function_name
stringlengths
1
63
docstring
stringlengths
50
5.89k
masked_code
stringlengths
50
882k
implementation
stringlengths
169
12.9k
start_line
int32
1
14.6k
end_line
int32
16
14.6k
file_content
stringlengths
274
882k
forward
Evaluate qNoisyExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Noisy Expected Improvement values at the given design points `X`.
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved r""" Batch acquisition functions using the reparameterization trick in combination with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and [Wilson2017reparam]_ .. [Rezende2014reparam] D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference in deep generative models. ICML 2014. .. [Wilson2017reparam] J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth. The reparameterization trick for acquisition functions. ArXiv 2017. """ import math from abc import ABC, abstractmethod from typing import Optional, Union import torch from torch import Tensor from ..exceptions.errors import UnsupportedError from ..models.model import Model from ..sampling.samplers import MCSampler, SobolQMCNormalSampler from ..utils.transforms import ( concatenate_pending_points, match_batch_shape, t_batch_mode_transform, ) from .acquisition import AcquisitionFunction from .objective import IdentityMCObjective, MCAcquisitionObjective from .utils import prune_inferior_points class MCAcquisitionFunction(AcquisitionFunction, ABC): r"""Abstract base class for Monte-Carlo based batch acquisition functions.""" def __init__( self, model: Model, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""Constructor for the MCAcquisitionFunction base class. Args: model: A fitted model. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. """ super().__init__(model=model) if sampler is None: sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True) self.add_module("sampler", sampler) if objective is None: objective = IdentityMCObjective() elif not isinstance(objective, MCAcquisitionObjective): raise UnsupportedError( "Only objectives of type MCAcquisitionObjective are supported for " "MC acquisition functions." ) self.add_module("objective", objective) self.set_X_pending(X_pending) @abstractmethod def forward(self, X: Tensor) -> Tensor: r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim design points each, and returns a one-dimensional Tensor with `(b)` elements. Should utilize the result of set_X_pending as needed to account for pending function evaluations. """ pass # pragma: no cover class qExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Expected Improvement. This computes qEI by (1) sampling the joint posterior over q points (2) evaluating the improvement over the current best for each sample (3) maximizing over q (4) averaging over the samples `qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qEI = qExpectedImprovement(model, best_f, sampler) >>> qei = qEI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Expected Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Expected Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) obj = (obj - self.best_f).clamp_min(0) q_ei = obj.max(dim=-1)[0].mean(dim=0) return q_ei class qNoisyExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Noisy Expected Improvement. This function does not assume a `best_f` is known (which would require noiseless observations). Instead, it uses samples from the joint posterior over the `q` test points and previously observed points. The improvement over previously observed points is computed for each sample and averaged. `qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where `(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler) >>> qnei = qNEI(test_X) """ def __init__( self, model: Model, X_baseline: Tensor, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, prune_baseline: bool = False, ) -> None: r"""q-Noisy Expected Improvement. Args: model: A fitted model. X_baseline: A `r x d`-dim Tensor of `r` design points that have already been observed. These points are considered as the potential best design point. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. prune_baseline: If True, remove points in `X_baseline` that are highly unlikely to be the best point. This can significantly improve performance and is generally recommended. In order to customize pruning parameters, instead manually call `botorch.acquisition.utils.prune_inferior_points` on `X_baseline` before instantiating the acquisition function. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if prune_baseline: X_baseline = prune_inferior_points( model=model, X=X_baseline, objective=objective ) self.register_buffer("X_baseline", X_baseline) # MASKED: forward function (lines 212-233) class qProbabilityOfImprovement(MCAcquisitionFunction): r"""MC-based batch Probability of Improvement. Estimates the probability of improvement over the current best observed value by sampling from the joint posterior distribution of the q-batch. MC-based estimates of a probability involves taking expectation of an indicator function; to support auto-differntiation, the indicator is replaced with a sigmoid function with temperature parameter `tau`. `qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qPI = qProbabilityOfImprovement(model, best_f, sampler) >>> qpi = qPI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, tau: float = 1e-3, ) -> None: r"""q-Probability of Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. tau: The temperature parameter used in the sigmoid approximation of the step function. Smaller values yield more accurate approximations of the function, but result in gradients estimates with higher variance. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) if not torch.is_tensor(tau): tau = torch.tensor(float(tau)) self.register_buffer("tau", tau) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qProbabilityOfImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Probability of Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) max_obj = obj.max(dim=-1)[0] val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0) return val class qSimpleRegret(MCAcquisitionFunction): r"""MC-based batch Simple Regret. Samples from the joint posterior over the q-batch and computes the simple regret. `qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qSR = qSimpleRegret(model, sampler) >>> qsr = qSR(test_X) """ @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qSimpleRegret on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Simple Regret values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) val = obj.max(dim=-1)[0].mean(dim=0) return val class qUpperConfidenceBound(MCAcquisitionFunction): r"""MC-based batch Upper Confidence Bound. Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A of [Wilson2017reparam].) `qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)` and `f(X)` has distribution `N(mu, Sigma)`. Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qUCB = qUpperConfidenceBound(model, 0.1, sampler) >>> qucb = qUCB(test_X) """ def __init__( self, model: Model, beta: float, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Upper Confidence Bound. Args: model: A fitted model. beta: Controls tradeoff between mean and standard deviation in UCB. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) self.beta_prime = math.sqrt(beta * math.pi / 2) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qUpperConfidenceBound on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Upper Confidence Bound values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) mean = obj.mean(dim=0) ucb_samples = mean + self.beta_prime * (obj - mean).abs() return ucb_samples.max(dim=-1)[0].mean(dim=0)
@concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Noisy Expected Improvement values at the given design points `X`. """ q = X.shape[-2] X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2) # TODO (T41248036): Implement more efficient way to compute posterior # over both training and test points in GPyTorch posterior = self.model.posterior(X_full) samples = self.sampler(posterior) obj = self.objective(samples) diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0] return diffs.clamp_min(0).mean(dim=0)
212
233
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved r""" Batch acquisition functions using the reparameterization trick in combination with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and [Wilson2017reparam]_ .. [Rezende2014reparam] D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference in deep generative models. ICML 2014. .. [Wilson2017reparam] J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth. The reparameterization trick for acquisition functions. ArXiv 2017. """ import math from abc import ABC, abstractmethod from typing import Optional, Union import torch from torch import Tensor from ..exceptions.errors import UnsupportedError from ..models.model import Model from ..sampling.samplers import MCSampler, SobolQMCNormalSampler from ..utils.transforms import ( concatenate_pending_points, match_batch_shape, t_batch_mode_transform, ) from .acquisition import AcquisitionFunction from .objective import IdentityMCObjective, MCAcquisitionObjective from .utils import prune_inferior_points class MCAcquisitionFunction(AcquisitionFunction, ABC): r"""Abstract base class for Monte-Carlo based batch acquisition functions.""" def __init__( self, model: Model, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""Constructor for the MCAcquisitionFunction base class. Args: model: A fitted model. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. """ super().__init__(model=model) if sampler is None: sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True) self.add_module("sampler", sampler) if objective is None: objective = IdentityMCObjective() elif not isinstance(objective, MCAcquisitionObjective): raise UnsupportedError( "Only objectives of type MCAcquisitionObjective are supported for " "MC acquisition functions." ) self.add_module("objective", objective) self.set_X_pending(X_pending) @abstractmethod def forward(self, X: Tensor) -> Tensor: r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim design points each, and returns a one-dimensional Tensor with `(b)` elements. Should utilize the result of set_X_pending as needed to account for pending function evaluations. """ pass # pragma: no cover class qExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Expected Improvement. This computes qEI by (1) sampling the joint posterior over q points (2) evaluating the improvement over the current best for each sample (3) maximizing over q (4) averaging over the samples `qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qEI = qExpectedImprovement(model, best_f, sampler) >>> qei = qEI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Expected Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Expected Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) obj = (obj - self.best_f).clamp_min(0) q_ei = obj.max(dim=-1)[0].mean(dim=0) return q_ei class qNoisyExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Noisy Expected Improvement. This function does not assume a `best_f` is known (which would require noiseless observations). Instead, it uses samples from the joint posterior over the `q` test points and previously observed points. The improvement over previously observed points is computed for each sample and averaged. `qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where `(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler) >>> qnei = qNEI(test_X) """ def __init__( self, model: Model, X_baseline: Tensor, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, prune_baseline: bool = False, ) -> None: r"""q-Noisy Expected Improvement. Args: model: A fitted model. X_baseline: A `r x d`-dim Tensor of `r` design points that have already been observed. These points are considered as the potential best design point. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. prune_baseline: If True, remove points in `X_baseline` that are highly unlikely to be the best point. This can significantly improve performance and is generally recommended. In order to customize pruning parameters, instead manually call `botorch.acquisition.utils.prune_inferior_points` on `X_baseline` before instantiating the acquisition function. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if prune_baseline: X_baseline = prune_inferior_points( model=model, X=X_baseline, objective=objective ) self.register_buffer("X_baseline", X_baseline) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Noisy Expected Improvement values at the given design points `X`. """ q = X.shape[-2] X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2) # TODO (T41248036): Implement more efficient way to compute posterior # over both training and test points in GPyTorch posterior = self.model.posterior(X_full) samples = self.sampler(posterior) obj = self.objective(samples) diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0] return diffs.clamp_min(0).mean(dim=0) class qProbabilityOfImprovement(MCAcquisitionFunction): r"""MC-based batch Probability of Improvement. Estimates the probability of improvement over the current best observed value by sampling from the joint posterior distribution of the q-batch. MC-based estimates of a probability involves taking expectation of an indicator function; to support auto-differntiation, the indicator is replaced with a sigmoid function with temperature parameter `tau`. `qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qPI = qProbabilityOfImprovement(model, best_f, sampler) >>> qpi = qPI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, tau: float = 1e-3, ) -> None: r"""q-Probability of Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. tau: The temperature parameter used in the sigmoid approximation of the step function. Smaller values yield more accurate approximations of the function, but result in gradients estimates with higher variance. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) if not torch.is_tensor(tau): tau = torch.tensor(float(tau)) self.register_buffer("tau", tau) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qProbabilityOfImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Probability of Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) max_obj = obj.max(dim=-1)[0] val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0) return val class qSimpleRegret(MCAcquisitionFunction): r"""MC-based batch Simple Regret. Samples from the joint posterior over the q-batch and computes the simple regret. `qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qSR = qSimpleRegret(model, sampler) >>> qsr = qSR(test_X) """ @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qSimpleRegret on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Simple Regret values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) val = obj.max(dim=-1)[0].mean(dim=0) return val class qUpperConfidenceBound(MCAcquisitionFunction): r"""MC-based batch Upper Confidence Bound. Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A of [Wilson2017reparam].) `qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)` and `f(X)` has distribution `N(mu, Sigma)`. Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qUCB = qUpperConfidenceBound(model, 0.1, sampler) >>> qucb = qUCB(test_X) """ def __init__( self, model: Model, beta: float, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Upper Confidence Bound. Args: model: A fitted model. beta: Controls tradeoff between mean and standard deviation in UCB. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) self.beta_prime = math.sqrt(beta * math.pi / 2) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qUpperConfidenceBound on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Upper Confidence Bound values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) mean = obj.mean(dim=0) ucb_samples = mean + self.beta_prime * (obj - mean).abs() return ucb_samples.max(dim=-1)[0].mean(dim=0)
__init__
q-Probability of Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. tau: The temperature parameter used in the sigmoid approximation of the step function. Smaller values yield more accurate approximations of the function, but result in gradients estimates with higher variance.
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved r""" Batch acquisition functions using the reparameterization trick in combination with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and [Wilson2017reparam]_ .. [Rezende2014reparam] D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference in deep generative models. ICML 2014. .. [Wilson2017reparam] J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth. The reparameterization trick for acquisition functions. ArXiv 2017. """ import math from abc import ABC, abstractmethod from typing import Optional, Union import torch from torch import Tensor from ..exceptions.errors import UnsupportedError from ..models.model import Model from ..sampling.samplers import MCSampler, SobolQMCNormalSampler from ..utils.transforms import ( concatenate_pending_points, match_batch_shape, t_batch_mode_transform, ) from .acquisition import AcquisitionFunction from .objective import IdentityMCObjective, MCAcquisitionObjective from .utils import prune_inferior_points class MCAcquisitionFunction(AcquisitionFunction, ABC): r"""Abstract base class for Monte-Carlo based batch acquisition functions.""" def __init__( self, model: Model, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""Constructor for the MCAcquisitionFunction base class. Args: model: A fitted model. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. """ super().__init__(model=model) if sampler is None: sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True) self.add_module("sampler", sampler) if objective is None: objective = IdentityMCObjective() elif not isinstance(objective, MCAcquisitionObjective): raise UnsupportedError( "Only objectives of type MCAcquisitionObjective are supported for " "MC acquisition functions." ) self.add_module("objective", objective) self.set_X_pending(X_pending) @abstractmethod def forward(self, X: Tensor) -> Tensor: r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim design points each, and returns a one-dimensional Tensor with `(b)` elements. Should utilize the result of set_X_pending as needed to account for pending function evaluations. """ pass # pragma: no cover class qExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Expected Improvement. This computes qEI by (1) sampling the joint posterior over q points (2) evaluating the improvement over the current best for each sample (3) maximizing over q (4) averaging over the samples `qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qEI = qExpectedImprovement(model, best_f, sampler) >>> qei = qEI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Expected Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Expected Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) obj = (obj - self.best_f).clamp_min(0) q_ei = obj.max(dim=-1)[0].mean(dim=0) return q_ei class qNoisyExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Noisy Expected Improvement. This function does not assume a `best_f` is known (which would require noiseless observations). Instead, it uses samples from the joint posterior over the `q` test points and previously observed points. The improvement over previously observed points is computed for each sample and averaged. `qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where `(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler) >>> qnei = qNEI(test_X) """ def __init__( self, model: Model, X_baseline: Tensor, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, prune_baseline: bool = False, ) -> None: r"""q-Noisy Expected Improvement. Args: model: A fitted model. X_baseline: A `r x d`-dim Tensor of `r` design points that have already been observed. These points are considered as the potential best design point. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. prune_baseline: If True, remove points in `X_baseline` that are highly unlikely to be the best point. This can significantly improve performance and is generally recommended. In order to customize pruning parameters, instead manually call `botorch.acquisition.utils.prune_inferior_points` on `X_baseline` before instantiating the acquisition function. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if prune_baseline: X_baseline = prune_inferior_points( model=model, X=X_baseline, objective=objective ) self.register_buffer("X_baseline", X_baseline) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Noisy Expected Improvement values at the given design points `X`. """ q = X.shape[-2] X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2) # TODO (T41248036): Implement more efficient way to compute posterior # over both training and test points in GPyTorch posterior = self.model.posterior(X_full) samples = self.sampler(posterior) obj = self.objective(samples) diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0] return diffs.clamp_min(0).mean(dim=0) class qProbabilityOfImprovement(MCAcquisitionFunction): r"""MC-based batch Probability of Improvement. Estimates the probability of improvement over the current best observed value by sampling from the joint posterior distribution of the q-batch. MC-based estimates of a probability involves taking expectation of an indicator function; to support auto-differntiation, the indicator is replaced with a sigmoid function with temperature parameter `tau`. `qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qPI = qProbabilityOfImprovement(model, best_f, sampler) >>> qpi = qPI(test_X) """ # MASKED: __init__ function (lines 255-290) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qProbabilityOfImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Probability of Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) max_obj = obj.max(dim=-1)[0] val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0) return val class qSimpleRegret(MCAcquisitionFunction): r"""MC-based batch Simple Regret. Samples from the joint posterior over the q-batch and computes the simple regret. `qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qSR = qSimpleRegret(model, sampler) >>> qsr = qSR(test_X) """ @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qSimpleRegret on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Simple Regret values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) val = obj.max(dim=-1)[0].mean(dim=0) return val class qUpperConfidenceBound(MCAcquisitionFunction): r"""MC-based batch Upper Confidence Bound. Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A of [Wilson2017reparam].) `qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)` and `f(X)` has distribution `N(mu, Sigma)`. Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qUCB = qUpperConfidenceBound(model, 0.1, sampler) >>> qucb = qUCB(test_X) """ def __init__( self, model: Model, beta: float, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Upper Confidence Bound. Args: model: A fitted model. beta: Controls tradeoff between mean and standard deviation in UCB. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) self.beta_prime = math.sqrt(beta * math.pi / 2) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qUpperConfidenceBound on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Upper Confidence Bound values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) mean = obj.mean(dim=0) ucb_samples = mean + self.beta_prime * (obj - mean).abs() return ucb_samples.max(dim=-1)[0].mean(dim=0)
def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, tau: float = 1e-3, ) -> None: r"""q-Probability of Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. tau: The temperature parameter used in the sigmoid approximation of the step function. Smaller values yield more accurate approximations of the function, but result in gradients estimates with higher variance. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) if not torch.is_tensor(tau): tau = torch.tensor(float(tau)) self.register_buffer("tau", tau)
255
290
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved r""" Batch acquisition functions using the reparameterization trick in combination with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and [Wilson2017reparam]_ .. [Rezende2014reparam] D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference in deep generative models. ICML 2014. .. [Wilson2017reparam] J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth. The reparameterization trick for acquisition functions. ArXiv 2017. """ import math from abc import ABC, abstractmethod from typing import Optional, Union import torch from torch import Tensor from ..exceptions.errors import UnsupportedError from ..models.model import Model from ..sampling.samplers import MCSampler, SobolQMCNormalSampler from ..utils.transforms import ( concatenate_pending_points, match_batch_shape, t_batch_mode_transform, ) from .acquisition import AcquisitionFunction from .objective import IdentityMCObjective, MCAcquisitionObjective from .utils import prune_inferior_points class MCAcquisitionFunction(AcquisitionFunction, ABC): r"""Abstract base class for Monte-Carlo based batch acquisition functions.""" def __init__( self, model: Model, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""Constructor for the MCAcquisitionFunction base class. Args: model: A fitted model. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. """ super().__init__(model=model) if sampler is None: sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True) self.add_module("sampler", sampler) if objective is None: objective = IdentityMCObjective() elif not isinstance(objective, MCAcquisitionObjective): raise UnsupportedError( "Only objectives of type MCAcquisitionObjective are supported for " "MC acquisition functions." ) self.add_module("objective", objective) self.set_X_pending(X_pending) @abstractmethod def forward(self, X: Tensor) -> Tensor: r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim design points each, and returns a one-dimensional Tensor with `(b)` elements. Should utilize the result of set_X_pending as needed to account for pending function evaluations. """ pass # pragma: no cover class qExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Expected Improvement. This computes qEI by (1) sampling the joint posterior over q points (2) evaluating the improvement over the current best for each sample (3) maximizing over q (4) averaging over the samples `qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qEI = qExpectedImprovement(model, best_f, sampler) >>> qei = qEI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Expected Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Expected Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) obj = (obj - self.best_f).clamp_min(0) q_ei = obj.max(dim=-1)[0].mean(dim=0) return q_ei class qNoisyExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Noisy Expected Improvement. This function does not assume a `best_f` is known (which would require noiseless observations). Instead, it uses samples from the joint posterior over the `q` test points and previously observed points. The improvement over previously observed points is computed for each sample and averaged. `qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where `(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler) >>> qnei = qNEI(test_X) """ def __init__( self, model: Model, X_baseline: Tensor, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, prune_baseline: bool = False, ) -> None: r"""q-Noisy Expected Improvement. Args: model: A fitted model. X_baseline: A `r x d`-dim Tensor of `r` design points that have already been observed. These points are considered as the potential best design point. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. prune_baseline: If True, remove points in `X_baseline` that are highly unlikely to be the best point. This can significantly improve performance and is generally recommended. In order to customize pruning parameters, instead manually call `botorch.acquisition.utils.prune_inferior_points` on `X_baseline` before instantiating the acquisition function. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if prune_baseline: X_baseline = prune_inferior_points( model=model, X=X_baseline, objective=objective ) self.register_buffer("X_baseline", X_baseline) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Noisy Expected Improvement values at the given design points `X`. """ q = X.shape[-2] X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2) # TODO (T41248036): Implement more efficient way to compute posterior # over both training and test points in GPyTorch posterior = self.model.posterior(X_full) samples = self.sampler(posterior) obj = self.objective(samples) diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0] return diffs.clamp_min(0).mean(dim=0) class qProbabilityOfImprovement(MCAcquisitionFunction): r"""MC-based batch Probability of Improvement. Estimates the probability of improvement over the current best observed value by sampling from the joint posterior distribution of the q-batch. MC-based estimates of a probability involves taking expectation of an indicator function; to support auto-differntiation, the indicator is replaced with a sigmoid function with temperature parameter `tau`. `qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qPI = qProbabilityOfImprovement(model, best_f, sampler) >>> qpi = qPI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, tau: float = 1e-3, ) -> None: r"""q-Probability of Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. tau: The temperature parameter used in the sigmoid approximation of the step function. Smaller values yield more accurate approximations of the function, but result in gradients estimates with higher variance. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) if not torch.is_tensor(tau): tau = torch.tensor(float(tau)) self.register_buffer("tau", tau) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qProbabilityOfImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Probability of Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) max_obj = obj.max(dim=-1)[0] val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0) return val class qSimpleRegret(MCAcquisitionFunction): r"""MC-based batch Simple Regret. Samples from the joint posterior over the q-batch and computes the simple regret. `qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qSR = qSimpleRegret(model, sampler) >>> qsr = qSR(test_X) """ @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qSimpleRegret on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Simple Regret values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) val = obj.max(dim=-1)[0].mean(dim=0) return val class qUpperConfidenceBound(MCAcquisitionFunction): r"""MC-based batch Upper Confidence Bound. Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A of [Wilson2017reparam].) `qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)` and `f(X)` has distribution `N(mu, Sigma)`. Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qUCB = qUpperConfidenceBound(model, 0.1, sampler) >>> qucb = qUCB(test_X) """ def __init__( self, model: Model, beta: float, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Upper Confidence Bound. Args: model: A fitted model. beta: Controls tradeoff between mean and standard deviation in UCB. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) self.beta_prime = math.sqrt(beta * math.pi / 2) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qUpperConfidenceBound on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Upper Confidence Bound values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) mean = obj.mean(dim=0) ucb_samples = mean + self.beta_prime * (obj - mean).abs() return ucb_samples.max(dim=-1)[0].mean(dim=0)
forward
Evaluate qProbabilityOfImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Probability of Improvement values at the given design points `X`.
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved r""" Batch acquisition functions using the reparameterization trick in combination with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and [Wilson2017reparam]_ .. [Rezende2014reparam] D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference in deep generative models. ICML 2014. .. [Wilson2017reparam] J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth. The reparameterization trick for acquisition functions. ArXiv 2017. """ import math from abc import ABC, abstractmethod from typing import Optional, Union import torch from torch import Tensor from ..exceptions.errors import UnsupportedError from ..models.model import Model from ..sampling.samplers import MCSampler, SobolQMCNormalSampler from ..utils.transforms import ( concatenate_pending_points, match_batch_shape, t_batch_mode_transform, ) from .acquisition import AcquisitionFunction from .objective import IdentityMCObjective, MCAcquisitionObjective from .utils import prune_inferior_points class MCAcquisitionFunction(AcquisitionFunction, ABC): r"""Abstract base class for Monte-Carlo based batch acquisition functions.""" def __init__( self, model: Model, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""Constructor for the MCAcquisitionFunction base class. Args: model: A fitted model. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. """ super().__init__(model=model) if sampler is None: sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True) self.add_module("sampler", sampler) if objective is None: objective = IdentityMCObjective() elif not isinstance(objective, MCAcquisitionObjective): raise UnsupportedError( "Only objectives of type MCAcquisitionObjective are supported for " "MC acquisition functions." ) self.add_module("objective", objective) self.set_X_pending(X_pending) @abstractmethod def forward(self, X: Tensor) -> Tensor: r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim design points each, and returns a one-dimensional Tensor with `(b)` elements. Should utilize the result of set_X_pending as needed to account for pending function evaluations. """ pass # pragma: no cover class qExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Expected Improvement. This computes qEI by (1) sampling the joint posterior over q points (2) evaluating the improvement over the current best for each sample (3) maximizing over q (4) averaging over the samples `qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qEI = qExpectedImprovement(model, best_f, sampler) >>> qei = qEI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Expected Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Expected Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) obj = (obj - self.best_f).clamp_min(0) q_ei = obj.max(dim=-1)[0].mean(dim=0) return q_ei class qNoisyExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Noisy Expected Improvement. This function does not assume a `best_f` is known (which would require noiseless observations). Instead, it uses samples from the joint posterior over the `q` test points and previously observed points. The improvement over previously observed points is computed for each sample and averaged. `qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where `(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler) >>> qnei = qNEI(test_X) """ def __init__( self, model: Model, X_baseline: Tensor, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, prune_baseline: bool = False, ) -> None: r"""q-Noisy Expected Improvement. Args: model: A fitted model. X_baseline: A `r x d`-dim Tensor of `r` design points that have already been observed. These points are considered as the potential best design point. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. prune_baseline: If True, remove points in `X_baseline` that are highly unlikely to be the best point. This can significantly improve performance and is generally recommended. In order to customize pruning parameters, instead manually call `botorch.acquisition.utils.prune_inferior_points` on `X_baseline` before instantiating the acquisition function. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if prune_baseline: X_baseline = prune_inferior_points( model=model, X=X_baseline, objective=objective ) self.register_buffer("X_baseline", X_baseline) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Noisy Expected Improvement values at the given design points `X`. """ q = X.shape[-2] X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2) # TODO (T41248036): Implement more efficient way to compute posterior # over both training and test points in GPyTorch posterior = self.model.posterior(X_full) samples = self.sampler(posterior) obj = self.objective(samples) diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0] return diffs.clamp_min(0).mean(dim=0) class qProbabilityOfImprovement(MCAcquisitionFunction): r"""MC-based batch Probability of Improvement. Estimates the probability of improvement over the current best observed value by sampling from the joint posterior distribution of the q-batch. MC-based estimates of a probability involves taking expectation of an indicator function; to support auto-differntiation, the indicator is replaced with a sigmoid function with temperature parameter `tau`. `qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qPI = qProbabilityOfImprovement(model, best_f, sampler) >>> qpi = qPI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, tau: float = 1e-3, ) -> None: r"""q-Probability of Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. tau: The temperature parameter used in the sigmoid approximation of the step function. Smaller values yield more accurate approximations of the function, but result in gradients estimates with higher variance. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) if not torch.is_tensor(tau): tau = torch.tensor(float(tau)) self.register_buffer("tau", tau) # MASKED: forward function (lines 292-310) class qSimpleRegret(MCAcquisitionFunction): r"""MC-based batch Simple Regret. Samples from the joint posterior over the q-batch and computes the simple regret. `qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qSR = qSimpleRegret(model, sampler) >>> qsr = qSR(test_X) """ @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qSimpleRegret on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Simple Regret values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) val = obj.max(dim=-1)[0].mean(dim=0) return val class qUpperConfidenceBound(MCAcquisitionFunction): r"""MC-based batch Upper Confidence Bound. Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A of [Wilson2017reparam].) `qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)` and `f(X)` has distribution `N(mu, Sigma)`. Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qUCB = qUpperConfidenceBound(model, 0.1, sampler) >>> qucb = qUCB(test_X) """ def __init__( self, model: Model, beta: float, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Upper Confidence Bound. Args: model: A fitted model. beta: Controls tradeoff between mean and standard deviation in UCB. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) self.beta_prime = math.sqrt(beta * math.pi / 2) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qUpperConfidenceBound on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Upper Confidence Bound values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) mean = obj.mean(dim=0) ucb_samples = mean + self.beta_prime * (obj - mean).abs() return ucb_samples.max(dim=-1)[0].mean(dim=0)
@concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qProbabilityOfImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Probability of Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) max_obj = obj.max(dim=-1)[0] val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0) return val
292
310
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved r""" Batch acquisition functions using the reparameterization trick in combination with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and [Wilson2017reparam]_ .. [Rezende2014reparam] D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference in deep generative models. ICML 2014. .. [Wilson2017reparam] J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth. The reparameterization trick for acquisition functions. ArXiv 2017. """ import math from abc import ABC, abstractmethod from typing import Optional, Union import torch from torch import Tensor from ..exceptions.errors import UnsupportedError from ..models.model import Model from ..sampling.samplers import MCSampler, SobolQMCNormalSampler from ..utils.transforms import ( concatenate_pending_points, match_batch_shape, t_batch_mode_transform, ) from .acquisition import AcquisitionFunction from .objective import IdentityMCObjective, MCAcquisitionObjective from .utils import prune_inferior_points class MCAcquisitionFunction(AcquisitionFunction, ABC): r"""Abstract base class for Monte-Carlo based batch acquisition functions.""" def __init__( self, model: Model, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""Constructor for the MCAcquisitionFunction base class. Args: model: A fitted model. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. """ super().__init__(model=model) if sampler is None: sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True) self.add_module("sampler", sampler) if objective is None: objective = IdentityMCObjective() elif not isinstance(objective, MCAcquisitionObjective): raise UnsupportedError( "Only objectives of type MCAcquisitionObjective are supported for " "MC acquisition functions." ) self.add_module("objective", objective) self.set_X_pending(X_pending) @abstractmethod def forward(self, X: Tensor) -> Tensor: r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim design points each, and returns a one-dimensional Tensor with `(b)` elements. Should utilize the result of set_X_pending as needed to account for pending function evaluations. """ pass # pragma: no cover class qExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Expected Improvement. This computes qEI by (1) sampling the joint posterior over q points (2) evaluating the improvement over the current best for each sample (3) maximizing over q (4) averaging over the samples `qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qEI = qExpectedImprovement(model, best_f, sampler) >>> qei = qEI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Expected Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Expected Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) obj = (obj - self.best_f).clamp_min(0) q_ei = obj.max(dim=-1)[0].mean(dim=0) return q_ei class qNoisyExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Noisy Expected Improvement. This function does not assume a `best_f` is known (which would require noiseless observations). Instead, it uses samples from the joint posterior over the `q` test points and previously observed points. The improvement over previously observed points is computed for each sample and averaged. `qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where `(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler) >>> qnei = qNEI(test_X) """ def __init__( self, model: Model, X_baseline: Tensor, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, prune_baseline: bool = False, ) -> None: r"""q-Noisy Expected Improvement. Args: model: A fitted model. X_baseline: A `r x d`-dim Tensor of `r` design points that have already been observed. These points are considered as the potential best design point. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. prune_baseline: If True, remove points in `X_baseline` that are highly unlikely to be the best point. This can significantly improve performance and is generally recommended. In order to customize pruning parameters, instead manually call `botorch.acquisition.utils.prune_inferior_points` on `X_baseline` before instantiating the acquisition function. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if prune_baseline: X_baseline = prune_inferior_points( model=model, X=X_baseline, objective=objective ) self.register_buffer("X_baseline", X_baseline) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Noisy Expected Improvement values at the given design points `X`. """ q = X.shape[-2] X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2) # TODO (T41248036): Implement more efficient way to compute posterior # over both training and test points in GPyTorch posterior = self.model.posterior(X_full) samples = self.sampler(posterior) obj = self.objective(samples) diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0] return diffs.clamp_min(0).mean(dim=0) class qProbabilityOfImprovement(MCAcquisitionFunction): r"""MC-based batch Probability of Improvement. Estimates the probability of improvement over the current best observed value by sampling from the joint posterior distribution of the q-batch. MC-based estimates of a probability involves taking expectation of an indicator function; to support auto-differntiation, the indicator is replaced with a sigmoid function with temperature parameter `tau`. `qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qPI = qProbabilityOfImprovement(model, best_f, sampler) >>> qpi = qPI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, tau: float = 1e-3, ) -> None: r"""q-Probability of Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. tau: The temperature parameter used in the sigmoid approximation of the step function. Smaller values yield more accurate approximations of the function, but result in gradients estimates with higher variance. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) if not torch.is_tensor(tau): tau = torch.tensor(float(tau)) self.register_buffer("tau", tau) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qProbabilityOfImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Probability of Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) max_obj = obj.max(dim=-1)[0] val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0) return val class qSimpleRegret(MCAcquisitionFunction): r"""MC-based batch Simple Regret. Samples from the joint posterior over the q-batch and computes the simple regret. `qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qSR = qSimpleRegret(model, sampler) >>> qsr = qSR(test_X) """ @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qSimpleRegret on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Simple Regret values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) val = obj.max(dim=-1)[0].mean(dim=0) return val class qUpperConfidenceBound(MCAcquisitionFunction): r"""MC-based batch Upper Confidence Bound. Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A of [Wilson2017reparam].) `qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)` and `f(X)` has distribution `N(mu, Sigma)`. Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qUCB = qUpperConfidenceBound(model, 0.1, sampler) >>> qucb = qUCB(test_X) """ def __init__( self, model: Model, beta: float, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Upper Confidence Bound. Args: model: A fitted model. beta: Controls tradeoff between mean and standard deviation in UCB. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) self.beta_prime = math.sqrt(beta * math.pi / 2) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qUpperConfidenceBound on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Upper Confidence Bound values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) mean = obj.mean(dim=0) ucb_samples = mean + self.beta_prime * (obj - mean).abs() return ucb_samples.max(dim=-1)[0].mean(dim=0)
forward
Evaluate qSimpleRegret on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Simple Regret values at the given design points `X`.
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved r""" Batch acquisition functions using the reparameterization trick in combination with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and [Wilson2017reparam]_ .. [Rezende2014reparam] D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference in deep generative models. ICML 2014. .. [Wilson2017reparam] J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth. The reparameterization trick for acquisition functions. ArXiv 2017. """ import math from abc import ABC, abstractmethod from typing import Optional, Union import torch from torch import Tensor from ..exceptions.errors import UnsupportedError from ..models.model import Model from ..sampling.samplers import MCSampler, SobolQMCNormalSampler from ..utils.transforms import ( concatenate_pending_points, match_batch_shape, t_batch_mode_transform, ) from .acquisition import AcquisitionFunction from .objective import IdentityMCObjective, MCAcquisitionObjective from .utils import prune_inferior_points class MCAcquisitionFunction(AcquisitionFunction, ABC): r"""Abstract base class for Monte-Carlo based batch acquisition functions.""" def __init__( self, model: Model, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""Constructor for the MCAcquisitionFunction base class. Args: model: A fitted model. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. """ super().__init__(model=model) if sampler is None: sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True) self.add_module("sampler", sampler) if objective is None: objective = IdentityMCObjective() elif not isinstance(objective, MCAcquisitionObjective): raise UnsupportedError( "Only objectives of type MCAcquisitionObjective are supported for " "MC acquisition functions." ) self.add_module("objective", objective) self.set_X_pending(X_pending) @abstractmethod def forward(self, X: Tensor) -> Tensor: r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim design points each, and returns a one-dimensional Tensor with `(b)` elements. Should utilize the result of set_X_pending as needed to account for pending function evaluations. """ pass # pragma: no cover class qExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Expected Improvement. This computes qEI by (1) sampling the joint posterior over q points (2) evaluating the improvement over the current best for each sample (3) maximizing over q (4) averaging over the samples `qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qEI = qExpectedImprovement(model, best_f, sampler) >>> qei = qEI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Expected Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Expected Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) obj = (obj - self.best_f).clamp_min(0) q_ei = obj.max(dim=-1)[0].mean(dim=0) return q_ei class qNoisyExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Noisy Expected Improvement. This function does not assume a `best_f` is known (which would require noiseless observations). Instead, it uses samples from the joint posterior over the `q` test points and previously observed points. The improvement over previously observed points is computed for each sample and averaged. `qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where `(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler) >>> qnei = qNEI(test_X) """ def __init__( self, model: Model, X_baseline: Tensor, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, prune_baseline: bool = False, ) -> None: r"""q-Noisy Expected Improvement. Args: model: A fitted model. X_baseline: A `r x d`-dim Tensor of `r` design points that have already been observed. These points are considered as the potential best design point. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. prune_baseline: If True, remove points in `X_baseline` that are highly unlikely to be the best point. This can significantly improve performance and is generally recommended. In order to customize pruning parameters, instead manually call `botorch.acquisition.utils.prune_inferior_points` on `X_baseline` before instantiating the acquisition function. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if prune_baseline: X_baseline = prune_inferior_points( model=model, X=X_baseline, objective=objective ) self.register_buffer("X_baseline", X_baseline) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Noisy Expected Improvement values at the given design points `X`. """ q = X.shape[-2] X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2) # TODO (T41248036): Implement more efficient way to compute posterior # over both training and test points in GPyTorch posterior = self.model.posterior(X_full) samples = self.sampler(posterior) obj = self.objective(samples) diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0] return diffs.clamp_min(0).mean(dim=0) class qProbabilityOfImprovement(MCAcquisitionFunction): r"""MC-based batch Probability of Improvement. Estimates the probability of improvement over the current best observed value by sampling from the joint posterior distribution of the q-batch. MC-based estimates of a probability involves taking expectation of an indicator function; to support auto-differntiation, the indicator is replaced with a sigmoid function with temperature parameter `tau`. `qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qPI = qProbabilityOfImprovement(model, best_f, sampler) >>> qpi = qPI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, tau: float = 1e-3, ) -> None: r"""q-Probability of Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. tau: The temperature parameter used in the sigmoid approximation of the step function. Smaller values yield more accurate approximations of the function, but result in gradients estimates with higher variance. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) if not torch.is_tensor(tau): tau = torch.tensor(float(tau)) self.register_buffer("tau", tau) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qProbabilityOfImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Probability of Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) max_obj = obj.max(dim=-1)[0] val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0) return val class qSimpleRegret(MCAcquisitionFunction): r"""MC-based batch Simple Regret. Samples from the joint posterior over the q-batch and computes the simple regret. `qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qSR = qSimpleRegret(model, sampler) >>> qsr = qSR(test_X) """ # MASKED: forward function (lines 328-345) class qUpperConfidenceBound(MCAcquisitionFunction): r"""MC-based batch Upper Confidence Bound. Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A of [Wilson2017reparam].) `qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)` and `f(X)` has distribution `N(mu, Sigma)`. Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qUCB = qUpperConfidenceBound(model, 0.1, sampler) >>> qucb = qUCB(test_X) """ def __init__( self, model: Model, beta: float, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Upper Confidence Bound. Args: model: A fitted model. beta: Controls tradeoff between mean and standard deviation in UCB. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) self.beta_prime = math.sqrt(beta * math.pi / 2) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qUpperConfidenceBound on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Upper Confidence Bound values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) mean = obj.mean(dim=0) ucb_samples = mean + self.beta_prime * (obj - mean).abs() return ucb_samples.max(dim=-1)[0].mean(dim=0)
@concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qSimpleRegret on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Simple Regret values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) val = obj.max(dim=-1)[0].mean(dim=0) return val
328
345
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved r""" Batch acquisition functions using the reparameterization trick in combination with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and [Wilson2017reparam]_ .. [Rezende2014reparam] D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference in deep generative models. ICML 2014. .. [Wilson2017reparam] J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth. The reparameterization trick for acquisition functions. ArXiv 2017. """ import math from abc import ABC, abstractmethod from typing import Optional, Union import torch from torch import Tensor from ..exceptions.errors import UnsupportedError from ..models.model import Model from ..sampling.samplers import MCSampler, SobolQMCNormalSampler from ..utils.transforms import ( concatenate_pending_points, match_batch_shape, t_batch_mode_transform, ) from .acquisition import AcquisitionFunction from .objective import IdentityMCObjective, MCAcquisitionObjective from .utils import prune_inferior_points class MCAcquisitionFunction(AcquisitionFunction, ABC): r"""Abstract base class for Monte-Carlo based batch acquisition functions.""" def __init__( self, model: Model, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""Constructor for the MCAcquisitionFunction base class. Args: model: A fitted model. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. """ super().__init__(model=model) if sampler is None: sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True) self.add_module("sampler", sampler) if objective is None: objective = IdentityMCObjective() elif not isinstance(objective, MCAcquisitionObjective): raise UnsupportedError( "Only objectives of type MCAcquisitionObjective are supported for " "MC acquisition functions." ) self.add_module("objective", objective) self.set_X_pending(X_pending) @abstractmethod def forward(self, X: Tensor) -> Tensor: r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim design points each, and returns a one-dimensional Tensor with `(b)` elements. Should utilize the result of set_X_pending as needed to account for pending function evaluations. """ pass # pragma: no cover class qExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Expected Improvement. This computes qEI by (1) sampling the joint posterior over q points (2) evaluating the improvement over the current best for each sample (3) maximizing over q (4) averaging over the samples `qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qEI = qExpectedImprovement(model, best_f, sampler) >>> qei = qEI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Expected Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Expected Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) obj = (obj - self.best_f).clamp_min(0) q_ei = obj.max(dim=-1)[0].mean(dim=0) return q_ei class qNoisyExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Noisy Expected Improvement. This function does not assume a `best_f` is known (which would require noiseless observations). Instead, it uses samples from the joint posterior over the `q` test points and previously observed points. The improvement over previously observed points is computed for each sample and averaged. `qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where `(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler) >>> qnei = qNEI(test_X) """ def __init__( self, model: Model, X_baseline: Tensor, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, prune_baseline: bool = False, ) -> None: r"""q-Noisy Expected Improvement. Args: model: A fitted model. X_baseline: A `r x d`-dim Tensor of `r` design points that have already been observed. These points are considered as the potential best design point. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. prune_baseline: If True, remove points in `X_baseline` that are highly unlikely to be the best point. This can significantly improve performance and is generally recommended. In order to customize pruning parameters, instead manually call `botorch.acquisition.utils.prune_inferior_points` on `X_baseline` before instantiating the acquisition function. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if prune_baseline: X_baseline = prune_inferior_points( model=model, X=X_baseline, objective=objective ) self.register_buffer("X_baseline", X_baseline) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Noisy Expected Improvement values at the given design points `X`. """ q = X.shape[-2] X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2) # TODO (T41248036): Implement more efficient way to compute posterior # over both training and test points in GPyTorch posterior = self.model.posterior(X_full) samples = self.sampler(posterior) obj = self.objective(samples) diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0] return diffs.clamp_min(0).mean(dim=0) class qProbabilityOfImprovement(MCAcquisitionFunction): r"""MC-based batch Probability of Improvement. Estimates the probability of improvement over the current best observed value by sampling from the joint posterior distribution of the q-batch. MC-based estimates of a probability involves taking expectation of an indicator function; to support auto-differntiation, the indicator is replaced with a sigmoid function with temperature parameter `tau`. `qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qPI = qProbabilityOfImprovement(model, best_f, sampler) >>> qpi = qPI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, tau: float = 1e-3, ) -> None: r"""q-Probability of Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. tau: The temperature parameter used in the sigmoid approximation of the step function. Smaller values yield more accurate approximations of the function, but result in gradients estimates with higher variance. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) if not torch.is_tensor(tau): tau = torch.tensor(float(tau)) self.register_buffer("tau", tau) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qProbabilityOfImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Probability of Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) max_obj = obj.max(dim=-1)[0] val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0) return val class qSimpleRegret(MCAcquisitionFunction): r"""MC-based batch Simple Regret. Samples from the joint posterior over the q-batch and computes the simple regret. `qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qSR = qSimpleRegret(model, sampler) >>> qsr = qSR(test_X) """ @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qSimpleRegret on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Simple Regret values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) val = obj.max(dim=-1)[0].mean(dim=0) return val class qUpperConfidenceBound(MCAcquisitionFunction): r"""MC-based batch Upper Confidence Bound. Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A of [Wilson2017reparam].) `qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)` and `f(X)` has distribution `N(mu, Sigma)`. Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qUCB = qUpperConfidenceBound(model, 0.1, sampler) >>> qucb = qUCB(test_X) """ def __init__( self, model: Model, beta: float, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Upper Confidence Bound. Args: model: A fitted model. beta: Controls tradeoff between mean and standard deviation in UCB. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) self.beta_prime = math.sqrt(beta * math.pi / 2) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qUpperConfidenceBound on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Upper Confidence Bound values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) mean = obj.mean(dim=0) ucb_samples = mean + self.beta_prime * (obj - mean).abs() return ucb_samples.max(dim=-1)[0].mean(dim=0)
forward
Evaluate qUpperConfidenceBound on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Upper Confidence Bound values at the given design points `X`.
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved r""" Batch acquisition functions using the reparameterization trick in combination with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and [Wilson2017reparam]_ .. [Rezende2014reparam] D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference in deep generative models. ICML 2014. .. [Wilson2017reparam] J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth. The reparameterization trick for acquisition functions. ArXiv 2017. """ import math from abc import ABC, abstractmethod from typing import Optional, Union import torch from torch import Tensor from ..exceptions.errors import UnsupportedError from ..models.model import Model from ..sampling.samplers import MCSampler, SobolQMCNormalSampler from ..utils.transforms import ( concatenate_pending_points, match_batch_shape, t_batch_mode_transform, ) from .acquisition import AcquisitionFunction from .objective import IdentityMCObjective, MCAcquisitionObjective from .utils import prune_inferior_points class MCAcquisitionFunction(AcquisitionFunction, ABC): r"""Abstract base class for Monte-Carlo based batch acquisition functions.""" def __init__( self, model: Model, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""Constructor for the MCAcquisitionFunction base class. Args: model: A fitted model. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. """ super().__init__(model=model) if sampler is None: sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True) self.add_module("sampler", sampler) if objective is None: objective = IdentityMCObjective() elif not isinstance(objective, MCAcquisitionObjective): raise UnsupportedError( "Only objectives of type MCAcquisitionObjective are supported for " "MC acquisition functions." ) self.add_module("objective", objective) self.set_X_pending(X_pending) @abstractmethod def forward(self, X: Tensor) -> Tensor: r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim design points each, and returns a one-dimensional Tensor with `(b)` elements. Should utilize the result of set_X_pending as needed to account for pending function evaluations. """ pass # pragma: no cover class qExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Expected Improvement. This computes qEI by (1) sampling the joint posterior over q points (2) evaluating the improvement over the current best for each sample (3) maximizing over q (4) averaging over the samples `qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qEI = qExpectedImprovement(model, best_f, sampler) >>> qei = qEI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Expected Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Expected Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) obj = (obj - self.best_f).clamp_min(0) q_ei = obj.max(dim=-1)[0].mean(dim=0) return q_ei class qNoisyExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Noisy Expected Improvement. This function does not assume a `best_f` is known (which would require noiseless observations). Instead, it uses samples from the joint posterior over the `q` test points and previously observed points. The improvement over previously observed points is computed for each sample and averaged. `qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where `(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler) >>> qnei = qNEI(test_X) """ def __init__( self, model: Model, X_baseline: Tensor, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, prune_baseline: bool = False, ) -> None: r"""q-Noisy Expected Improvement. Args: model: A fitted model. X_baseline: A `r x d`-dim Tensor of `r` design points that have already been observed. These points are considered as the potential best design point. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. prune_baseline: If True, remove points in `X_baseline` that are highly unlikely to be the best point. This can significantly improve performance and is generally recommended. In order to customize pruning parameters, instead manually call `botorch.acquisition.utils.prune_inferior_points` on `X_baseline` before instantiating the acquisition function. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if prune_baseline: X_baseline = prune_inferior_points( model=model, X=X_baseline, objective=objective ) self.register_buffer("X_baseline", X_baseline) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Noisy Expected Improvement values at the given design points `X`. """ q = X.shape[-2] X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2) # TODO (T41248036): Implement more efficient way to compute posterior # over both training and test points in GPyTorch posterior = self.model.posterior(X_full) samples = self.sampler(posterior) obj = self.objective(samples) diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0] return diffs.clamp_min(0).mean(dim=0) class qProbabilityOfImprovement(MCAcquisitionFunction): r"""MC-based batch Probability of Improvement. Estimates the probability of improvement over the current best observed value by sampling from the joint posterior distribution of the q-batch. MC-based estimates of a probability involves taking expectation of an indicator function; to support auto-differntiation, the indicator is replaced with a sigmoid function with temperature parameter `tau`. `qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qPI = qProbabilityOfImprovement(model, best_f, sampler) >>> qpi = qPI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, tau: float = 1e-3, ) -> None: r"""q-Probability of Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. tau: The temperature parameter used in the sigmoid approximation of the step function. Smaller values yield more accurate approximations of the function, but result in gradients estimates with higher variance. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) if not torch.is_tensor(tau): tau = torch.tensor(float(tau)) self.register_buffer("tau", tau) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qProbabilityOfImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Probability of Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) max_obj = obj.max(dim=-1)[0] val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0) return val class qSimpleRegret(MCAcquisitionFunction): r"""MC-based batch Simple Regret. Samples from the joint posterior over the q-batch and computes the simple regret. `qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qSR = qSimpleRegret(model, sampler) >>> qsr = qSR(test_X) """ @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qSimpleRegret on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Simple Regret values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) val = obj.max(dim=-1)[0].mean(dim=0) return val class qUpperConfidenceBound(MCAcquisitionFunction): r"""MC-based batch Upper Confidence Bound. Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A of [Wilson2017reparam].) `qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)` and `f(X)` has distribution `N(mu, Sigma)`. Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qUCB = qUpperConfidenceBound(model, 0.1, sampler) >>> qucb = qUCB(test_X) """ def __init__( self, model: Model, beta: float, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Upper Confidence Bound. Args: model: A fitted model. beta: Controls tradeoff between mean and standard deviation in UCB. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) self.beta_prime = math.sqrt(beta * math.pi / 2) # MASKED: forward function (lines 391-409)
@concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qUpperConfidenceBound on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Upper Confidence Bound values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) mean = obj.mean(dim=0) ucb_samples = mean + self.beta_prime * (obj - mean).abs() return ucb_samples.max(dim=-1)[0].mean(dim=0)
391
409
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved r""" Batch acquisition functions using the reparameterization trick in combination with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and [Wilson2017reparam]_ .. [Rezende2014reparam] D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and approximate inference in deep generative models. ICML 2014. .. [Wilson2017reparam] J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth. The reparameterization trick for acquisition functions. ArXiv 2017. """ import math from abc import ABC, abstractmethod from typing import Optional, Union import torch from torch import Tensor from ..exceptions.errors import UnsupportedError from ..models.model import Model from ..sampling.samplers import MCSampler, SobolQMCNormalSampler from ..utils.transforms import ( concatenate_pending_points, match_batch_shape, t_batch_mode_transform, ) from .acquisition import AcquisitionFunction from .objective import IdentityMCObjective, MCAcquisitionObjective from .utils import prune_inferior_points class MCAcquisitionFunction(AcquisitionFunction, ABC): r"""Abstract base class for Monte-Carlo based batch acquisition functions.""" def __init__( self, model: Model, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""Constructor for the MCAcquisitionFunction base class. Args: model: A fitted model. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. """ super().__init__(model=model) if sampler is None: sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True) self.add_module("sampler", sampler) if objective is None: objective = IdentityMCObjective() elif not isinstance(objective, MCAcquisitionObjective): raise UnsupportedError( "Only objectives of type MCAcquisitionObjective are supported for " "MC acquisition functions." ) self.add_module("objective", objective) self.set_X_pending(X_pending) @abstractmethod def forward(self, X: Tensor) -> Tensor: r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim design points each, and returns a one-dimensional Tensor with `(b)` elements. Should utilize the result of set_X_pending as needed to account for pending function evaluations. """ pass # pragma: no cover class qExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Expected Improvement. This computes qEI by (1) sampling the joint posterior over q points (2) evaluating the improvement over the current best for each sample (3) maximizing over q (4) averaging over the samples `qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qEI = qExpectedImprovement(model, best_f, sampler) >>> qei = qEI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Expected Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Expected Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) obj = (obj - self.best_f).clamp_min(0) q_ei = obj.max(dim=-1)[0].mean(dim=0) return q_ei class qNoisyExpectedImprovement(MCAcquisitionFunction): r"""MC-based batch Noisy Expected Improvement. This function does not assume a `best_f` is known (which would require noiseless observations). Instead, it uses samples from the joint posterior over the `q` test points and previously observed points. The improvement over previously observed points is computed for each sample and averaged. `qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where `(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler) >>> qnei = qNEI(test_X) """ def __init__( self, model: Model, X_baseline: Tensor, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, prune_baseline: bool = False, ) -> None: r"""q-Noisy Expected Improvement. Args: model: A fitted model. X_baseline: A `r x d`-dim Tensor of `r` design points that have already been observed. These points are considered as the potential best design point. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`. objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. prune_baseline: If True, remove points in `X_baseline` that are highly unlikely to be the best point. This can significantly improve performance and is generally recommended. In order to customize pruning parameters, instead manually call `botorch.acquisition.utils.prune_inferior_points` on `X_baseline` before instantiating the acquisition function. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if prune_baseline: X_baseline = prune_inferior_points( model=model, X=X_baseline, objective=objective ) self.register_buffer("X_baseline", X_baseline) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Noisy Expected Improvement values at the given design points `X`. """ q = X.shape[-2] X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2) # TODO (T41248036): Implement more efficient way to compute posterior # over both training and test points in GPyTorch posterior = self.model.posterior(X_full) samples = self.sampler(posterior) obj = self.objective(samples) diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0] return diffs.clamp_min(0).mean(dim=0) class qProbabilityOfImprovement(MCAcquisitionFunction): r"""MC-based batch Probability of Improvement. Estimates the probability of improvement over the current best observed value by sampling from the joint posterior distribution of the q-batch. MC-based estimates of a probability involves taking expectation of an indicator function; to support auto-differntiation, the indicator is replaced with a sigmoid function with temperature parameter `tau`. `qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> best_f = train_Y.max()[0] >>> sampler = SobolQMCNormalSampler(1000) >>> qPI = qProbabilityOfImprovement(model, best_f, sampler) >>> qpi = qPI(test_X) """ def __init__( self, model: Model, best_f: Union[float, Tensor], sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, tau: float = 1e-3, ) -> None: r"""q-Probability of Improvement. Args: model: A fitted model. best_f: The best objective value observed so far (assumed noiseless). sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. tau: The temperature parameter used in the sigmoid approximation of the step function. Smaller values yield more accurate approximations of the function, but result in gradients estimates with higher variance. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) if not torch.is_tensor(best_f): best_f = torch.tensor(float(best_f)) self.register_buffer("best_f", best_f) if not torch.is_tensor(tau): tau = torch.tensor(float(tau)) self.register_buffer("tau", tau) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qProbabilityOfImprovement on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Probability of Improvement values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) max_obj = obj.max(dim=-1)[0] val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0) return val class qSimpleRegret(MCAcquisitionFunction): r"""MC-based batch Simple Regret. Samples from the joint posterior over the q-batch and computes the simple regret. `qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)` Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qSR = qSimpleRegret(model, sampler) >>> qsr = qSR(test_X) """ @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qSimpleRegret on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Simple Regret values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) val = obj.max(dim=-1)[0].mean(dim=0) return val class qUpperConfidenceBound(MCAcquisitionFunction): r"""MC-based batch Upper Confidence Bound. Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A of [Wilson2017reparam].) `qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)` and `f(X)` has distribution `N(mu, Sigma)`. Example: >>> model = SingleTaskGP(train_X, train_Y) >>> sampler = SobolQMCNormalSampler(1000) >>> qUCB = qUpperConfidenceBound(model, 0.1, sampler) >>> qucb = qUCB(test_X) """ def __init__( self, model: Model, beta: float, sampler: Optional[MCSampler] = None, objective: Optional[MCAcquisitionObjective] = None, X_pending: Optional[Tensor] = None, ) -> None: r"""q-Upper Confidence Bound. Args: model: A fitted model. beta: Controls tradeoff between mean and standard deviation in UCB. sampler: The sampler used to draw base samples. Defaults to `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)` objective: The MCAcquisitionObjective under which the samples are evaluated. Defaults to `IdentityMCObjective()`. X_pending: A `m x d`-dim Tensor of `m` design points that have points that have been submitted for function evaluation but have not yet been evaluated. Concatenated into X upon forward call. Copied and set to have no gradient. """ super().__init__( model=model, sampler=sampler, objective=objective, X_pending=X_pending ) self.beta_prime = math.sqrt(beta * math.pi / 2) @concatenate_pending_points @t_batch_mode_transform() def forward(self, X: Tensor) -> Tensor: r"""Evaluate qUpperConfidenceBound on the candidate set `X`. Args: X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim design points each. Returns: A `(b)`-dim Tensor of Upper Confidence Bound values at the given design points `X`. """ posterior = self.model.posterior(X) samples = self.sampler(posterior) obj = self.objective(samples) mean = obj.mean(dim=0) ucb_samples = mean + self.beta_prime * (obj - mean).abs() return ucb_samples.max(dim=-1)[0].mean(dim=0)
draw_ocr
Visualize the results of OCR detection and recognition args: image(Image|array): RGB image boxes(list): boxes with shape(N, 4, 2) txts(list): the texts scores(list): txxs corresponding scores drop_score(float): only scores greater than drop_threshold will be visualized font_path: the path of font which is used to draw text return(array): the visualized img
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import sys import cv2 import numpy as np import json from PIL import Image, ImageDraw, ImageFont import math from paddle import inference def parse_args(): def str2bool(v): return v.lower() in ("true", "t", "1") parser = argparse.ArgumentParser() # params for prediction engine parser.add_argument("--use_gpu", type=str2bool, default=True) parser.add_argument("--ir_optim", type=str2bool, default=True) parser.add_argument("--use_tensorrt", type=str2bool, default=False) parser.add_argument("--use_fp16", type=str2bool, default=False) parser.add_argument("--gpu_mem", type=int, default=500) # params for text detector parser.add_argument("--image_dir", type=str) parser.add_argument("--det_algorithm", type=str, default='DB') parser.add_argument("--det_model_dir", type=str) parser.add_argument("--det_limit_side_len", type=float, default=960) parser.add_argument("--det_limit_type", type=str, default='max') # DB parmas parser.add_argument("--det_db_thresh", type=float, default=0.3) parser.add_argument("--det_db_box_thresh", type=float, default=0.6) parser.add_argument("--det_db_unclip_ratio", type=float, default=1.5) parser.add_argument("--max_batch_size", type=int, default=10) parser.add_argument("--use_dilation", type=bool, default=False) parser.add_argument("--det_db_score_mode", type=str, default="fast") # EAST parmas parser.add_argument("--det_east_score_thresh", type=float, default=0.8) parser.add_argument("--det_east_cover_thresh", type=float, default=0.1) parser.add_argument("--det_east_nms_thresh", type=float, default=0.2) # SAST parmas parser.add_argument("--det_sast_score_thresh", type=float, default=0.5) parser.add_argument("--det_sast_nms_thresh", type=float, default=0.2) parser.add_argument("--det_sast_polygon", type=bool, default=False) # params for text recognizer parser.add_argument("--rec_algorithm", type=str, default='CRNN') parser.add_argument("--rec_model_dir", type=str) parser.add_argument("--rec_image_shape", type=str, default="3, 32, 320") parser.add_argument("--rec_char_type", type=str, default='ch') parser.add_argument("--rec_batch_num", type=int, default=6) parser.add_argument("--max_text_length", type=int, default=25) parser.add_argument( "--rec_char_dict_path", type=str, default="./ppocr/utils/ppocr_keys_v1.txt") parser.add_argument("--use_space_char", type=str2bool, default=True) parser.add_argument( "--vis_font_path", type=str, default="./doc/fonts/simfang.ttf") parser.add_argument("--drop_score", type=float, default=0.5) # params for e2e parser.add_argument("--e2e_algorithm", type=str, default='PGNet') parser.add_argument("--e2e_model_dir", type=str) parser.add_argument("--e2e_limit_side_len", type=float, default=768) parser.add_argument("--e2e_limit_type", type=str, default='max') # PGNet parmas parser.add_argument("--e2e_pgnet_score_thresh", type=float, default=0.5) parser.add_argument( "--e2e_char_dict_path", type=str, default="./ppocr/utils/ic15_dict.txt") parser.add_argument("--e2e_pgnet_valid_set", type=str, default='totaltext') parser.add_argument("--e2e_pgnet_polygon", type=bool, default=True) parser.add_argument("--e2e_pgnet_mode", type=str, default='fast') # params for text classifier parser.add_argument("--use_angle_cls", type=str2bool, default=False) parser.add_argument("--cls_model_dir", type=str) parser.add_argument("--cls_image_shape", type=str, default="3, 48, 192") parser.add_argument("--label_list", type=list, default=['0', '180']) parser.add_argument("--cls_batch_num", type=int, default=6) parser.add_argument("--cls_thresh", type=float, default=0.9) parser.add_argument("--enable_mkldnn", type=str2bool, default=False) parser.add_argument("--cpu_threads", type=int, default=10) parser.add_argument("--use_pdserving", type=str2bool, default=False) parser.add_argument("--use_mp", type=str2bool, default=False) parser.add_argument("--total_process_num", type=int, default=1) parser.add_argument("--process_id", type=int, default=0) return parser.parse_args() def create_predictor(args, mode, logger): if mode == "det": model_dir = args.det_model_dir elif mode == 'cls': model_dir = args.cls_model_dir elif mode == 'rec': model_dir = args.rec_model_dir else: model_dir = args.e2e_model_dir if model_dir is None: logger.info("not find {} model file path {}".format(mode, model_dir)) sys.exit(0) model_file_path = model_dir + "/inference.pdmodel" params_file_path = model_dir + "/inference.pdiparams" if not os.path.exists(model_file_path): logger.info("not find model file path {}".format(model_file_path)) sys.exit(0) if not os.path.exists(params_file_path): logger.info("not find params file path {}".format(params_file_path)) sys.exit(0) config = inference.Config(model_file_path, params_file_path) if args.use_gpu: config.enable_use_gpu(args.gpu_mem, 0) if args.use_tensorrt: config.enable_tensorrt_engine( precision_mode=inference.PrecisionType.Half if args.use_fp16 else inference.PrecisionType.Float32, max_batch_size=args.max_batch_size) else: config.disable_gpu() cpu_threads = args.cpu_threads if hasattr(args, "cpu_threads") else 10 config.set_cpu_math_library_num_threads(cpu_threads) if args.enable_mkldnn: # cache 10 different shapes for mkldnn to avoid memory leak config.set_mkldnn_cache_capacity(10) config.enable_mkldnn() # enable memory optim config.enable_memory_optim() config.disable_glog_info() config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass") config.switch_use_feed_fetch_ops(False) # create predictor predictor = inference.create_predictor(config) input_names = predictor.get_input_names() for name in input_names: input_tensor = predictor.get_input_handle(name) output_names = predictor.get_output_names() output_tensors = [] for output_name in output_names: output_tensor = predictor.get_output_handle(output_name) output_tensors.append(output_tensor) return predictor, input_tensor, output_tensors def draw_e2e_res(dt_boxes, strs, img_path): src_im = cv2.imread(img_path) for box, str in zip(dt_boxes, strs): box = box.astype(np.int32).reshape((-1, 1, 2)) cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2) cv2.putText( src_im, str, org=(int(box[0, 0, 0]), int(box[0, 0, 1])), fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.7, color=(0, 255, 0), thickness=1) return src_im def draw_text_det_res(dt_boxes, img_path): src_im = cv2.imread(img_path) for box in dt_boxes: box = np.array(box).astype(np.int32).reshape(-1, 2) cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2) return src_im def resize_img(img, input_size=600): """ resize img and limit the longest side of the image to input_size """ img = np.array(img) im_shape = img.shape im_size_max = np.max(im_shape[0:2]) im_scale = float(input_size) / float(im_size_max) img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale) return img # MASKED: draw_ocr function (lines 207-245) def draw_ocr_box_txt(image, boxes, txts, scores=None, drop_score=0.5, font_path="./doc/simfang.ttf"): h, w = image.height, image.width img_left = image.copy() img_right = Image.new('RGB', (w, h), (255, 255, 255)) import random random.seed(0) draw_left = ImageDraw.Draw(img_left) draw_right = ImageDraw.Draw(img_right) for idx, (box, txt) in enumerate(zip(boxes, txts)): if scores is not None and scores[idx] < drop_score: continue color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) draw_left.polygon(box, fill=color) draw_right.polygon( [ box[0][0], box[0][1], box[1][0], box[1][1], box[2][0], box[2][1], box[3][0], box[3][1] ], outline=color) box_height = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][ 1])**2) box_width = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][ 1])**2) if box_height > 2 * box_width: font_size = max(int(box_width * 0.9), 10) font = ImageFont.truetype(font_path, font_size, encoding="utf-8") cur_y = box[0][1] for c in txt: char_size = font.getsize(c) draw_right.text( (box[0][0] + 3, cur_y), c, fill=(0, 0, 0), font=font) cur_y += char_size[1] else: font_size = max(int(box_height * 0.8), 10) font = ImageFont.truetype(font_path, font_size, encoding="utf-8") draw_right.text( [box[0][0], box[0][1]], txt, fill=(0, 0, 0), font=font) img_left = Image.blend(image, img_left, 0.5) img_show = Image.new('RGB', (w * 2, h), (255, 255, 255)) img_show.paste(img_left, (0, 0, w, h)) img_show.paste(img_right, (w, 0, w * 2, h)) return np.array(img_show) def str_count(s): """ Count the number of Chinese characters, a single English character and a single number equal to half the length of Chinese characters. args: s(string): the input of string return(int): the number of Chinese characters """ import string count_zh = count_pu = 0 s_len = len(s) en_dg_count = 0 for c in s: if c in string.ascii_letters or c.isdigit() or c.isspace(): en_dg_count += 1 elif c.isalpha(): count_zh += 1 else: count_pu += 1 return s_len - math.ceil(en_dg_count / 2) def text_visual(texts, scores, img_h=400, img_w=600, threshold=0., font_path="./doc/simfang.ttf"): """ create new blank img and draw txt on it args: texts(list): the text will be draw scores(list|None): corresponding score of each txt img_h(int): the height of blank img img_w(int): the width of blank img font_path: the path of font which is used to draw text return(array): """ if scores is not None: assert len(texts) == len( scores), "The number of txts and corresponding scores must match" def create_blank_img(): blank_img = np.ones(shape=[img_h, img_w], dtype=np.int8) * 255 blank_img[:, img_w - 1:] = 0 blank_img = Image.fromarray(blank_img).convert("RGB") draw_txt = ImageDraw.Draw(blank_img) return blank_img, draw_txt blank_img, draw_txt = create_blank_img() font_size = 20 txt_color = (0, 0, 0) font = ImageFont.truetype(font_path, font_size, encoding="utf-8") gap = font_size + 5 txt_img_list = [] count, index = 1, 0 for idx, txt in enumerate(texts): index += 1 if scores[idx] < threshold or math.isnan(scores[idx]): index -= 1 continue first_line = True while str_count(txt) >= img_w // font_size - 4: tmp = txt txt = tmp[:img_w // font_size - 4] if first_line: new_txt = str(index) + ': ' + txt first_line = False else: new_txt = ' ' + txt draw_txt.text((0, gap * count), new_txt, txt_color, font=font) txt = tmp[img_w // font_size - 4:] if count >= img_h // gap - 1: txt_img_list.append(np.array(blank_img)) blank_img, draw_txt = create_blank_img() count = 0 count += 1 if first_line: new_txt = str(index) + ': ' + txt + ' ' + '%.3f' % (scores[idx]) else: new_txt = " " + txt + " " + '%.3f' % (scores[idx]) draw_txt.text((0, gap * count), new_txt, txt_color, font=font) # whether add new blank img or not if count >= img_h // gap - 1 and idx + 1 < len(texts): txt_img_list.append(np.array(blank_img)) blank_img, draw_txt = create_blank_img() count = 0 count += 1 txt_img_list.append(np.array(blank_img)) if len(txt_img_list) == 1: blank_img = np.array(txt_img_list[0]) else: blank_img = np.concatenate(txt_img_list, axis=1) return np.array(blank_img) def base64_to_cv2(b64str): import base64 data = base64.b64decode(b64str.encode('utf8')) data = np.fromstring(data, np.uint8) data = cv2.imdecode(data, cv2.IMREAD_COLOR) return data def draw_boxes(image, boxes, scores=None, drop_score=0.5): if scores is None: scores = [1] * len(boxes) for (box, score) in zip(boxes, scores): if score < drop_score: continue box = np.reshape(np.array(box), [-1, 1, 2]).astype(np.int64) image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2) return image if __name__ == '__main__': test_img = "./doc/test_v2" predict_txt = "./doc/predict.txt" f = open(predict_txt, 'r') data = f.readlines() img_path, anno = data[0].strip().split('\t') img_name = os.path.basename(img_path) img_path = os.path.join(test_img, img_name) image = Image.open(img_path) data = json.loads(anno) boxes, txts, scores = [], [], [] for dic in data: boxes.append(dic['points']) txts.append(dic['transcription']) scores.append(round(dic['scores'], 3)) new_img = draw_ocr(image, boxes, txts, scores) cv2.imwrite(img_name, new_img)
def draw_ocr(image, boxes, txts=None, scores=None, drop_score=0.5, font_path="./doc/simfang.ttf"): """ Visualize the results of OCR detection and recognition args: image(Image|array): RGB image boxes(list): boxes with shape(N, 4, 2) txts(list): the texts scores(list): txxs corresponding scores drop_score(float): only scores greater than drop_threshold will be visualized font_path: the path of font which is used to draw text return(array): the visualized img """ if scores is None: scores = [1] * len(boxes) box_num = len(boxes) for i in range(box_num): if scores is not None and (scores[i] < drop_score or math.isnan(scores[i])): continue box = np.reshape(np.array(boxes[i]), [-1, 1, 2]).astype(np.int64) image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2) if txts is not None: img = np.array(resize_img(image, input_size=600)) txt_img = text_visual( txts, scores, img_h=img.shape[0], img_w=600, threshold=drop_score, font_path=font_path) img = np.concatenate([np.array(img), np.array(txt_img)], axis=1) return img return image
207
245
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import sys import cv2 import numpy as np import json from PIL import Image, ImageDraw, ImageFont import math from paddle import inference def parse_args(): def str2bool(v): return v.lower() in ("true", "t", "1") parser = argparse.ArgumentParser() # params for prediction engine parser.add_argument("--use_gpu", type=str2bool, default=True) parser.add_argument("--ir_optim", type=str2bool, default=True) parser.add_argument("--use_tensorrt", type=str2bool, default=False) parser.add_argument("--use_fp16", type=str2bool, default=False) parser.add_argument("--gpu_mem", type=int, default=500) # params for text detector parser.add_argument("--image_dir", type=str) parser.add_argument("--det_algorithm", type=str, default='DB') parser.add_argument("--det_model_dir", type=str) parser.add_argument("--det_limit_side_len", type=float, default=960) parser.add_argument("--det_limit_type", type=str, default='max') # DB parmas parser.add_argument("--det_db_thresh", type=float, default=0.3) parser.add_argument("--det_db_box_thresh", type=float, default=0.6) parser.add_argument("--det_db_unclip_ratio", type=float, default=1.5) parser.add_argument("--max_batch_size", type=int, default=10) parser.add_argument("--use_dilation", type=bool, default=False) parser.add_argument("--det_db_score_mode", type=str, default="fast") # EAST parmas parser.add_argument("--det_east_score_thresh", type=float, default=0.8) parser.add_argument("--det_east_cover_thresh", type=float, default=0.1) parser.add_argument("--det_east_nms_thresh", type=float, default=0.2) # SAST parmas parser.add_argument("--det_sast_score_thresh", type=float, default=0.5) parser.add_argument("--det_sast_nms_thresh", type=float, default=0.2) parser.add_argument("--det_sast_polygon", type=bool, default=False) # params for text recognizer parser.add_argument("--rec_algorithm", type=str, default='CRNN') parser.add_argument("--rec_model_dir", type=str) parser.add_argument("--rec_image_shape", type=str, default="3, 32, 320") parser.add_argument("--rec_char_type", type=str, default='ch') parser.add_argument("--rec_batch_num", type=int, default=6) parser.add_argument("--max_text_length", type=int, default=25) parser.add_argument( "--rec_char_dict_path", type=str, default="./ppocr/utils/ppocr_keys_v1.txt") parser.add_argument("--use_space_char", type=str2bool, default=True) parser.add_argument( "--vis_font_path", type=str, default="./doc/fonts/simfang.ttf") parser.add_argument("--drop_score", type=float, default=0.5) # params for e2e parser.add_argument("--e2e_algorithm", type=str, default='PGNet') parser.add_argument("--e2e_model_dir", type=str) parser.add_argument("--e2e_limit_side_len", type=float, default=768) parser.add_argument("--e2e_limit_type", type=str, default='max') # PGNet parmas parser.add_argument("--e2e_pgnet_score_thresh", type=float, default=0.5) parser.add_argument( "--e2e_char_dict_path", type=str, default="./ppocr/utils/ic15_dict.txt") parser.add_argument("--e2e_pgnet_valid_set", type=str, default='totaltext') parser.add_argument("--e2e_pgnet_polygon", type=bool, default=True) parser.add_argument("--e2e_pgnet_mode", type=str, default='fast') # params for text classifier parser.add_argument("--use_angle_cls", type=str2bool, default=False) parser.add_argument("--cls_model_dir", type=str) parser.add_argument("--cls_image_shape", type=str, default="3, 48, 192") parser.add_argument("--label_list", type=list, default=['0', '180']) parser.add_argument("--cls_batch_num", type=int, default=6) parser.add_argument("--cls_thresh", type=float, default=0.9) parser.add_argument("--enable_mkldnn", type=str2bool, default=False) parser.add_argument("--cpu_threads", type=int, default=10) parser.add_argument("--use_pdserving", type=str2bool, default=False) parser.add_argument("--use_mp", type=str2bool, default=False) parser.add_argument("--total_process_num", type=int, default=1) parser.add_argument("--process_id", type=int, default=0) return parser.parse_args() def create_predictor(args, mode, logger): if mode == "det": model_dir = args.det_model_dir elif mode == 'cls': model_dir = args.cls_model_dir elif mode == 'rec': model_dir = args.rec_model_dir else: model_dir = args.e2e_model_dir if model_dir is None: logger.info("not find {} model file path {}".format(mode, model_dir)) sys.exit(0) model_file_path = model_dir + "/inference.pdmodel" params_file_path = model_dir + "/inference.pdiparams" if not os.path.exists(model_file_path): logger.info("not find model file path {}".format(model_file_path)) sys.exit(0) if not os.path.exists(params_file_path): logger.info("not find params file path {}".format(params_file_path)) sys.exit(0) config = inference.Config(model_file_path, params_file_path) if args.use_gpu: config.enable_use_gpu(args.gpu_mem, 0) if args.use_tensorrt: config.enable_tensorrt_engine( precision_mode=inference.PrecisionType.Half if args.use_fp16 else inference.PrecisionType.Float32, max_batch_size=args.max_batch_size) else: config.disable_gpu() cpu_threads = args.cpu_threads if hasattr(args, "cpu_threads") else 10 config.set_cpu_math_library_num_threads(cpu_threads) if args.enable_mkldnn: # cache 10 different shapes for mkldnn to avoid memory leak config.set_mkldnn_cache_capacity(10) config.enable_mkldnn() # enable memory optim config.enable_memory_optim() config.disable_glog_info() config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass") config.switch_use_feed_fetch_ops(False) # create predictor predictor = inference.create_predictor(config) input_names = predictor.get_input_names() for name in input_names: input_tensor = predictor.get_input_handle(name) output_names = predictor.get_output_names() output_tensors = [] for output_name in output_names: output_tensor = predictor.get_output_handle(output_name) output_tensors.append(output_tensor) return predictor, input_tensor, output_tensors def draw_e2e_res(dt_boxes, strs, img_path): src_im = cv2.imread(img_path) for box, str in zip(dt_boxes, strs): box = box.astype(np.int32).reshape((-1, 1, 2)) cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2) cv2.putText( src_im, str, org=(int(box[0, 0, 0]), int(box[0, 0, 1])), fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.7, color=(0, 255, 0), thickness=1) return src_im def draw_text_det_res(dt_boxes, img_path): src_im = cv2.imread(img_path) for box in dt_boxes: box = np.array(box).astype(np.int32).reshape(-1, 2) cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2) return src_im def resize_img(img, input_size=600): """ resize img and limit the longest side of the image to input_size """ img = np.array(img) im_shape = img.shape im_size_max = np.max(im_shape[0:2]) im_scale = float(input_size) / float(im_size_max) img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale) return img def draw_ocr(image, boxes, txts=None, scores=None, drop_score=0.5, font_path="./doc/simfang.ttf"): """ Visualize the results of OCR detection and recognition args: image(Image|array): RGB image boxes(list): boxes with shape(N, 4, 2) txts(list): the texts scores(list): txxs corresponding scores drop_score(float): only scores greater than drop_threshold will be visualized font_path: the path of font which is used to draw text return(array): the visualized img """ if scores is None: scores = [1] * len(boxes) box_num = len(boxes) for i in range(box_num): if scores is not None and (scores[i] < drop_score or math.isnan(scores[i])): continue box = np.reshape(np.array(boxes[i]), [-1, 1, 2]).astype(np.int64) image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2) if txts is not None: img = np.array(resize_img(image, input_size=600)) txt_img = text_visual( txts, scores, img_h=img.shape[0], img_w=600, threshold=drop_score, font_path=font_path) img = np.concatenate([np.array(img), np.array(txt_img)], axis=1) return img return image def draw_ocr_box_txt(image, boxes, txts, scores=None, drop_score=0.5, font_path="./doc/simfang.ttf"): h, w = image.height, image.width img_left = image.copy() img_right = Image.new('RGB', (w, h), (255, 255, 255)) import random random.seed(0) draw_left = ImageDraw.Draw(img_left) draw_right = ImageDraw.Draw(img_right) for idx, (box, txt) in enumerate(zip(boxes, txts)): if scores is not None and scores[idx] < drop_score: continue color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) draw_left.polygon(box, fill=color) draw_right.polygon( [ box[0][0], box[0][1], box[1][0], box[1][1], box[2][0], box[2][1], box[3][0], box[3][1] ], outline=color) box_height = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][ 1])**2) box_width = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][ 1])**2) if box_height > 2 * box_width: font_size = max(int(box_width * 0.9), 10) font = ImageFont.truetype(font_path, font_size, encoding="utf-8") cur_y = box[0][1] for c in txt: char_size = font.getsize(c) draw_right.text( (box[0][0] + 3, cur_y), c, fill=(0, 0, 0), font=font) cur_y += char_size[1] else: font_size = max(int(box_height * 0.8), 10) font = ImageFont.truetype(font_path, font_size, encoding="utf-8") draw_right.text( [box[0][0], box[0][1]], txt, fill=(0, 0, 0), font=font) img_left = Image.blend(image, img_left, 0.5) img_show = Image.new('RGB', (w * 2, h), (255, 255, 255)) img_show.paste(img_left, (0, 0, w, h)) img_show.paste(img_right, (w, 0, w * 2, h)) return np.array(img_show) def str_count(s): """ Count the number of Chinese characters, a single English character and a single number equal to half the length of Chinese characters. args: s(string): the input of string return(int): the number of Chinese characters """ import string count_zh = count_pu = 0 s_len = len(s) en_dg_count = 0 for c in s: if c in string.ascii_letters or c.isdigit() or c.isspace(): en_dg_count += 1 elif c.isalpha(): count_zh += 1 else: count_pu += 1 return s_len - math.ceil(en_dg_count / 2) def text_visual(texts, scores, img_h=400, img_w=600, threshold=0., font_path="./doc/simfang.ttf"): """ create new blank img and draw txt on it args: texts(list): the text will be draw scores(list|None): corresponding score of each txt img_h(int): the height of blank img img_w(int): the width of blank img font_path: the path of font which is used to draw text return(array): """ if scores is not None: assert len(texts) == len( scores), "The number of txts and corresponding scores must match" def create_blank_img(): blank_img = np.ones(shape=[img_h, img_w], dtype=np.int8) * 255 blank_img[:, img_w - 1:] = 0 blank_img = Image.fromarray(blank_img).convert("RGB") draw_txt = ImageDraw.Draw(blank_img) return blank_img, draw_txt blank_img, draw_txt = create_blank_img() font_size = 20 txt_color = (0, 0, 0) font = ImageFont.truetype(font_path, font_size, encoding="utf-8") gap = font_size + 5 txt_img_list = [] count, index = 1, 0 for idx, txt in enumerate(texts): index += 1 if scores[idx] < threshold or math.isnan(scores[idx]): index -= 1 continue first_line = True while str_count(txt) >= img_w // font_size - 4: tmp = txt txt = tmp[:img_w // font_size - 4] if first_line: new_txt = str(index) + ': ' + txt first_line = False else: new_txt = ' ' + txt draw_txt.text((0, gap * count), new_txt, txt_color, font=font) txt = tmp[img_w // font_size - 4:] if count >= img_h // gap - 1: txt_img_list.append(np.array(blank_img)) blank_img, draw_txt = create_blank_img() count = 0 count += 1 if first_line: new_txt = str(index) + ': ' + txt + ' ' + '%.3f' % (scores[idx]) else: new_txt = " " + txt + " " + '%.3f' % (scores[idx]) draw_txt.text((0, gap * count), new_txt, txt_color, font=font) # whether add new blank img or not if count >= img_h // gap - 1 and idx + 1 < len(texts): txt_img_list.append(np.array(blank_img)) blank_img, draw_txt = create_blank_img() count = 0 count += 1 txt_img_list.append(np.array(blank_img)) if len(txt_img_list) == 1: blank_img = np.array(txt_img_list[0]) else: blank_img = np.concatenate(txt_img_list, axis=1) return np.array(blank_img) def base64_to_cv2(b64str): import base64 data = base64.b64decode(b64str.encode('utf8')) data = np.fromstring(data, np.uint8) data = cv2.imdecode(data, cv2.IMREAD_COLOR) return data def draw_boxes(image, boxes, scores=None, drop_score=0.5): if scores is None: scores = [1] * len(boxes) for (box, score) in zip(boxes, scores): if score < drop_score: continue box = np.reshape(np.array(box), [-1, 1, 2]).astype(np.int64) image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2) return image if __name__ == '__main__': test_img = "./doc/test_v2" predict_txt = "./doc/predict.txt" f = open(predict_txt, 'r') data = f.readlines() img_path, anno = data[0].strip().split('\t') img_name = os.path.basename(img_path) img_path = os.path.join(test_img, img_name) image = Image.open(img_path) data = json.loads(anno) boxes, txts, scores = [], [], [] for dic in data: boxes.append(dic['points']) txts.append(dic['transcription']) scores.append(round(dic['scores'], 3)) new_img = draw_ocr(image, boxes, txts, scores) cv2.imwrite(img_name, new_img)
str_count
Count the number of Chinese characters, a single English character and a single number equal to half the length of Chinese characters. args: s(string): the input of string return(int): the number of Chinese characters
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import sys import cv2 import numpy as np import json from PIL import Image, ImageDraw, ImageFont import math from paddle import inference def parse_args(): def str2bool(v): return v.lower() in ("true", "t", "1") parser = argparse.ArgumentParser() # params for prediction engine parser.add_argument("--use_gpu", type=str2bool, default=True) parser.add_argument("--ir_optim", type=str2bool, default=True) parser.add_argument("--use_tensorrt", type=str2bool, default=False) parser.add_argument("--use_fp16", type=str2bool, default=False) parser.add_argument("--gpu_mem", type=int, default=500) # params for text detector parser.add_argument("--image_dir", type=str) parser.add_argument("--det_algorithm", type=str, default='DB') parser.add_argument("--det_model_dir", type=str) parser.add_argument("--det_limit_side_len", type=float, default=960) parser.add_argument("--det_limit_type", type=str, default='max') # DB parmas parser.add_argument("--det_db_thresh", type=float, default=0.3) parser.add_argument("--det_db_box_thresh", type=float, default=0.6) parser.add_argument("--det_db_unclip_ratio", type=float, default=1.5) parser.add_argument("--max_batch_size", type=int, default=10) parser.add_argument("--use_dilation", type=bool, default=False) parser.add_argument("--det_db_score_mode", type=str, default="fast") # EAST parmas parser.add_argument("--det_east_score_thresh", type=float, default=0.8) parser.add_argument("--det_east_cover_thresh", type=float, default=0.1) parser.add_argument("--det_east_nms_thresh", type=float, default=0.2) # SAST parmas parser.add_argument("--det_sast_score_thresh", type=float, default=0.5) parser.add_argument("--det_sast_nms_thresh", type=float, default=0.2) parser.add_argument("--det_sast_polygon", type=bool, default=False) # params for text recognizer parser.add_argument("--rec_algorithm", type=str, default='CRNN') parser.add_argument("--rec_model_dir", type=str) parser.add_argument("--rec_image_shape", type=str, default="3, 32, 320") parser.add_argument("--rec_char_type", type=str, default='ch') parser.add_argument("--rec_batch_num", type=int, default=6) parser.add_argument("--max_text_length", type=int, default=25) parser.add_argument( "--rec_char_dict_path", type=str, default="./ppocr/utils/ppocr_keys_v1.txt") parser.add_argument("--use_space_char", type=str2bool, default=True) parser.add_argument( "--vis_font_path", type=str, default="./doc/fonts/simfang.ttf") parser.add_argument("--drop_score", type=float, default=0.5) # params for e2e parser.add_argument("--e2e_algorithm", type=str, default='PGNet') parser.add_argument("--e2e_model_dir", type=str) parser.add_argument("--e2e_limit_side_len", type=float, default=768) parser.add_argument("--e2e_limit_type", type=str, default='max') # PGNet parmas parser.add_argument("--e2e_pgnet_score_thresh", type=float, default=0.5) parser.add_argument( "--e2e_char_dict_path", type=str, default="./ppocr/utils/ic15_dict.txt") parser.add_argument("--e2e_pgnet_valid_set", type=str, default='totaltext') parser.add_argument("--e2e_pgnet_polygon", type=bool, default=True) parser.add_argument("--e2e_pgnet_mode", type=str, default='fast') # params for text classifier parser.add_argument("--use_angle_cls", type=str2bool, default=False) parser.add_argument("--cls_model_dir", type=str) parser.add_argument("--cls_image_shape", type=str, default="3, 48, 192") parser.add_argument("--label_list", type=list, default=['0', '180']) parser.add_argument("--cls_batch_num", type=int, default=6) parser.add_argument("--cls_thresh", type=float, default=0.9) parser.add_argument("--enable_mkldnn", type=str2bool, default=False) parser.add_argument("--cpu_threads", type=int, default=10) parser.add_argument("--use_pdserving", type=str2bool, default=False) parser.add_argument("--use_mp", type=str2bool, default=False) parser.add_argument("--total_process_num", type=int, default=1) parser.add_argument("--process_id", type=int, default=0) return parser.parse_args() def create_predictor(args, mode, logger): if mode == "det": model_dir = args.det_model_dir elif mode == 'cls': model_dir = args.cls_model_dir elif mode == 'rec': model_dir = args.rec_model_dir else: model_dir = args.e2e_model_dir if model_dir is None: logger.info("not find {} model file path {}".format(mode, model_dir)) sys.exit(0) model_file_path = model_dir + "/inference.pdmodel" params_file_path = model_dir + "/inference.pdiparams" if not os.path.exists(model_file_path): logger.info("not find model file path {}".format(model_file_path)) sys.exit(0) if not os.path.exists(params_file_path): logger.info("not find params file path {}".format(params_file_path)) sys.exit(0) config = inference.Config(model_file_path, params_file_path) if args.use_gpu: config.enable_use_gpu(args.gpu_mem, 0) if args.use_tensorrt: config.enable_tensorrt_engine( precision_mode=inference.PrecisionType.Half if args.use_fp16 else inference.PrecisionType.Float32, max_batch_size=args.max_batch_size) else: config.disable_gpu() cpu_threads = args.cpu_threads if hasattr(args, "cpu_threads") else 10 config.set_cpu_math_library_num_threads(cpu_threads) if args.enable_mkldnn: # cache 10 different shapes for mkldnn to avoid memory leak config.set_mkldnn_cache_capacity(10) config.enable_mkldnn() # enable memory optim config.enable_memory_optim() config.disable_glog_info() config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass") config.switch_use_feed_fetch_ops(False) # create predictor predictor = inference.create_predictor(config) input_names = predictor.get_input_names() for name in input_names: input_tensor = predictor.get_input_handle(name) output_names = predictor.get_output_names() output_tensors = [] for output_name in output_names: output_tensor = predictor.get_output_handle(output_name) output_tensors.append(output_tensor) return predictor, input_tensor, output_tensors def draw_e2e_res(dt_boxes, strs, img_path): src_im = cv2.imread(img_path) for box, str in zip(dt_boxes, strs): box = box.astype(np.int32).reshape((-1, 1, 2)) cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2) cv2.putText( src_im, str, org=(int(box[0, 0, 0]), int(box[0, 0, 1])), fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.7, color=(0, 255, 0), thickness=1) return src_im def draw_text_det_res(dt_boxes, img_path): src_im = cv2.imread(img_path) for box in dt_boxes: box = np.array(box).astype(np.int32).reshape(-1, 2) cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2) return src_im def resize_img(img, input_size=600): """ resize img and limit the longest side of the image to input_size """ img = np.array(img) im_shape = img.shape im_size_max = np.max(im_shape[0:2]) im_scale = float(input_size) / float(im_size_max) img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale) return img def draw_ocr(image, boxes, txts=None, scores=None, drop_score=0.5, font_path="./doc/simfang.ttf"): """ Visualize the results of OCR detection and recognition args: image(Image|array): RGB image boxes(list): boxes with shape(N, 4, 2) txts(list): the texts scores(list): txxs corresponding scores drop_score(float): only scores greater than drop_threshold will be visualized font_path: the path of font which is used to draw text return(array): the visualized img """ if scores is None: scores = [1] * len(boxes) box_num = len(boxes) for i in range(box_num): if scores is not None and (scores[i] < drop_score or math.isnan(scores[i])): continue box = np.reshape(np.array(boxes[i]), [-1, 1, 2]).astype(np.int64) image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2) if txts is not None: img = np.array(resize_img(image, input_size=600)) txt_img = text_visual( txts, scores, img_h=img.shape[0], img_w=600, threshold=drop_score, font_path=font_path) img = np.concatenate([np.array(img), np.array(txt_img)], axis=1) return img return image def draw_ocr_box_txt(image, boxes, txts, scores=None, drop_score=0.5, font_path="./doc/simfang.ttf"): h, w = image.height, image.width img_left = image.copy() img_right = Image.new('RGB', (w, h), (255, 255, 255)) import random random.seed(0) draw_left = ImageDraw.Draw(img_left) draw_right = ImageDraw.Draw(img_right) for idx, (box, txt) in enumerate(zip(boxes, txts)): if scores is not None and scores[idx] < drop_score: continue color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) draw_left.polygon(box, fill=color) draw_right.polygon( [ box[0][0], box[0][1], box[1][0], box[1][1], box[2][0], box[2][1], box[3][0], box[3][1] ], outline=color) box_height = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][ 1])**2) box_width = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][ 1])**2) if box_height > 2 * box_width: font_size = max(int(box_width * 0.9), 10) font = ImageFont.truetype(font_path, font_size, encoding="utf-8") cur_y = box[0][1] for c in txt: char_size = font.getsize(c) draw_right.text( (box[0][0] + 3, cur_y), c, fill=(0, 0, 0), font=font) cur_y += char_size[1] else: font_size = max(int(box_height * 0.8), 10) font = ImageFont.truetype(font_path, font_size, encoding="utf-8") draw_right.text( [box[0][0], box[0][1]], txt, fill=(0, 0, 0), font=font) img_left = Image.blend(image, img_left, 0.5) img_show = Image.new('RGB', (w * 2, h), (255, 255, 255)) img_show.paste(img_left, (0, 0, w, h)) img_show.paste(img_right, (w, 0, w * 2, h)) return np.array(img_show) # MASKED: str_count function (lines 300-321) def text_visual(texts, scores, img_h=400, img_w=600, threshold=0., font_path="./doc/simfang.ttf"): """ create new blank img and draw txt on it args: texts(list): the text will be draw scores(list|None): corresponding score of each txt img_h(int): the height of blank img img_w(int): the width of blank img font_path: the path of font which is used to draw text return(array): """ if scores is not None: assert len(texts) == len( scores), "The number of txts and corresponding scores must match" def create_blank_img(): blank_img = np.ones(shape=[img_h, img_w], dtype=np.int8) * 255 blank_img[:, img_w - 1:] = 0 blank_img = Image.fromarray(blank_img).convert("RGB") draw_txt = ImageDraw.Draw(blank_img) return blank_img, draw_txt blank_img, draw_txt = create_blank_img() font_size = 20 txt_color = (0, 0, 0) font = ImageFont.truetype(font_path, font_size, encoding="utf-8") gap = font_size + 5 txt_img_list = [] count, index = 1, 0 for idx, txt in enumerate(texts): index += 1 if scores[idx] < threshold or math.isnan(scores[idx]): index -= 1 continue first_line = True while str_count(txt) >= img_w // font_size - 4: tmp = txt txt = tmp[:img_w // font_size - 4] if first_line: new_txt = str(index) + ': ' + txt first_line = False else: new_txt = ' ' + txt draw_txt.text((0, gap * count), new_txt, txt_color, font=font) txt = tmp[img_w // font_size - 4:] if count >= img_h // gap - 1: txt_img_list.append(np.array(blank_img)) blank_img, draw_txt = create_blank_img() count = 0 count += 1 if first_line: new_txt = str(index) + ': ' + txt + ' ' + '%.3f' % (scores[idx]) else: new_txt = " " + txt + " " + '%.3f' % (scores[idx]) draw_txt.text((0, gap * count), new_txt, txt_color, font=font) # whether add new blank img or not if count >= img_h // gap - 1 and idx + 1 < len(texts): txt_img_list.append(np.array(blank_img)) blank_img, draw_txt = create_blank_img() count = 0 count += 1 txt_img_list.append(np.array(blank_img)) if len(txt_img_list) == 1: blank_img = np.array(txt_img_list[0]) else: blank_img = np.concatenate(txt_img_list, axis=1) return np.array(blank_img) def base64_to_cv2(b64str): import base64 data = base64.b64decode(b64str.encode('utf8')) data = np.fromstring(data, np.uint8) data = cv2.imdecode(data, cv2.IMREAD_COLOR) return data def draw_boxes(image, boxes, scores=None, drop_score=0.5): if scores is None: scores = [1] * len(boxes) for (box, score) in zip(boxes, scores): if score < drop_score: continue box = np.reshape(np.array(box), [-1, 1, 2]).astype(np.int64) image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2) return image if __name__ == '__main__': test_img = "./doc/test_v2" predict_txt = "./doc/predict.txt" f = open(predict_txt, 'r') data = f.readlines() img_path, anno = data[0].strip().split('\t') img_name = os.path.basename(img_path) img_path = os.path.join(test_img, img_name) image = Image.open(img_path) data = json.loads(anno) boxes, txts, scores = [], [], [] for dic in data: boxes.append(dic['points']) txts.append(dic['transcription']) scores.append(round(dic['scores'], 3)) new_img = draw_ocr(image, boxes, txts, scores) cv2.imwrite(img_name, new_img)
def str_count(s): """ Count the number of Chinese characters, a single English character and a single number equal to half the length of Chinese characters. args: s(string): the input of string return(int): the number of Chinese characters """ import string count_zh = count_pu = 0 s_len = len(s) en_dg_count = 0 for c in s: if c in string.ascii_letters or c.isdigit() or c.isspace(): en_dg_count += 1 elif c.isalpha(): count_zh += 1 else: count_pu += 1 return s_len - math.ceil(en_dg_count / 2)
300
321
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import sys import cv2 import numpy as np import json from PIL import Image, ImageDraw, ImageFont import math from paddle import inference def parse_args(): def str2bool(v): return v.lower() in ("true", "t", "1") parser = argparse.ArgumentParser() # params for prediction engine parser.add_argument("--use_gpu", type=str2bool, default=True) parser.add_argument("--ir_optim", type=str2bool, default=True) parser.add_argument("--use_tensorrt", type=str2bool, default=False) parser.add_argument("--use_fp16", type=str2bool, default=False) parser.add_argument("--gpu_mem", type=int, default=500) # params for text detector parser.add_argument("--image_dir", type=str) parser.add_argument("--det_algorithm", type=str, default='DB') parser.add_argument("--det_model_dir", type=str) parser.add_argument("--det_limit_side_len", type=float, default=960) parser.add_argument("--det_limit_type", type=str, default='max') # DB parmas parser.add_argument("--det_db_thresh", type=float, default=0.3) parser.add_argument("--det_db_box_thresh", type=float, default=0.6) parser.add_argument("--det_db_unclip_ratio", type=float, default=1.5) parser.add_argument("--max_batch_size", type=int, default=10) parser.add_argument("--use_dilation", type=bool, default=False) parser.add_argument("--det_db_score_mode", type=str, default="fast") # EAST parmas parser.add_argument("--det_east_score_thresh", type=float, default=0.8) parser.add_argument("--det_east_cover_thresh", type=float, default=0.1) parser.add_argument("--det_east_nms_thresh", type=float, default=0.2) # SAST parmas parser.add_argument("--det_sast_score_thresh", type=float, default=0.5) parser.add_argument("--det_sast_nms_thresh", type=float, default=0.2) parser.add_argument("--det_sast_polygon", type=bool, default=False) # params for text recognizer parser.add_argument("--rec_algorithm", type=str, default='CRNN') parser.add_argument("--rec_model_dir", type=str) parser.add_argument("--rec_image_shape", type=str, default="3, 32, 320") parser.add_argument("--rec_char_type", type=str, default='ch') parser.add_argument("--rec_batch_num", type=int, default=6) parser.add_argument("--max_text_length", type=int, default=25) parser.add_argument( "--rec_char_dict_path", type=str, default="./ppocr/utils/ppocr_keys_v1.txt") parser.add_argument("--use_space_char", type=str2bool, default=True) parser.add_argument( "--vis_font_path", type=str, default="./doc/fonts/simfang.ttf") parser.add_argument("--drop_score", type=float, default=0.5) # params for e2e parser.add_argument("--e2e_algorithm", type=str, default='PGNet') parser.add_argument("--e2e_model_dir", type=str) parser.add_argument("--e2e_limit_side_len", type=float, default=768) parser.add_argument("--e2e_limit_type", type=str, default='max') # PGNet parmas parser.add_argument("--e2e_pgnet_score_thresh", type=float, default=0.5) parser.add_argument( "--e2e_char_dict_path", type=str, default="./ppocr/utils/ic15_dict.txt") parser.add_argument("--e2e_pgnet_valid_set", type=str, default='totaltext') parser.add_argument("--e2e_pgnet_polygon", type=bool, default=True) parser.add_argument("--e2e_pgnet_mode", type=str, default='fast') # params for text classifier parser.add_argument("--use_angle_cls", type=str2bool, default=False) parser.add_argument("--cls_model_dir", type=str) parser.add_argument("--cls_image_shape", type=str, default="3, 48, 192") parser.add_argument("--label_list", type=list, default=['0', '180']) parser.add_argument("--cls_batch_num", type=int, default=6) parser.add_argument("--cls_thresh", type=float, default=0.9) parser.add_argument("--enable_mkldnn", type=str2bool, default=False) parser.add_argument("--cpu_threads", type=int, default=10) parser.add_argument("--use_pdserving", type=str2bool, default=False) parser.add_argument("--use_mp", type=str2bool, default=False) parser.add_argument("--total_process_num", type=int, default=1) parser.add_argument("--process_id", type=int, default=0) return parser.parse_args() def create_predictor(args, mode, logger): if mode == "det": model_dir = args.det_model_dir elif mode == 'cls': model_dir = args.cls_model_dir elif mode == 'rec': model_dir = args.rec_model_dir else: model_dir = args.e2e_model_dir if model_dir is None: logger.info("not find {} model file path {}".format(mode, model_dir)) sys.exit(0) model_file_path = model_dir + "/inference.pdmodel" params_file_path = model_dir + "/inference.pdiparams" if not os.path.exists(model_file_path): logger.info("not find model file path {}".format(model_file_path)) sys.exit(0) if not os.path.exists(params_file_path): logger.info("not find params file path {}".format(params_file_path)) sys.exit(0) config = inference.Config(model_file_path, params_file_path) if args.use_gpu: config.enable_use_gpu(args.gpu_mem, 0) if args.use_tensorrt: config.enable_tensorrt_engine( precision_mode=inference.PrecisionType.Half if args.use_fp16 else inference.PrecisionType.Float32, max_batch_size=args.max_batch_size) else: config.disable_gpu() cpu_threads = args.cpu_threads if hasattr(args, "cpu_threads") else 10 config.set_cpu_math_library_num_threads(cpu_threads) if args.enable_mkldnn: # cache 10 different shapes for mkldnn to avoid memory leak config.set_mkldnn_cache_capacity(10) config.enable_mkldnn() # enable memory optim config.enable_memory_optim() config.disable_glog_info() config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass") config.switch_use_feed_fetch_ops(False) # create predictor predictor = inference.create_predictor(config) input_names = predictor.get_input_names() for name in input_names: input_tensor = predictor.get_input_handle(name) output_names = predictor.get_output_names() output_tensors = [] for output_name in output_names: output_tensor = predictor.get_output_handle(output_name) output_tensors.append(output_tensor) return predictor, input_tensor, output_tensors def draw_e2e_res(dt_boxes, strs, img_path): src_im = cv2.imread(img_path) for box, str in zip(dt_boxes, strs): box = box.astype(np.int32).reshape((-1, 1, 2)) cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2) cv2.putText( src_im, str, org=(int(box[0, 0, 0]), int(box[0, 0, 1])), fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.7, color=(0, 255, 0), thickness=1) return src_im def draw_text_det_res(dt_boxes, img_path): src_im = cv2.imread(img_path) for box in dt_boxes: box = np.array(box).astype(np.int32).reshape(-1, 2) cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2) return src_im def resize_img(img, input_size=600): """ resize img and limit the longest side of the image to input_size """ img = np.array(img) im_shape = img.shape im_size_max = np.max(im_shape[0:2]) im_scale = float(input_size) / float(im_size_max) img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale) return img def draw_ocr(image, boxes, txts=None, scores=None, drop_score=0.5, font_path="./doc/simfang.ttf"): """ Visualize the results of OCR detection and recognition args: image(Image|array): RGB image boxes(list): boxes with shape(N, 4, 2) txts(list): the texts scores(list): txxs corresponding scores drop_score(float): only scores greater than drop_threshold will be visualized font_path: the path of font which is used to draw text return(array): the visualized img """ if scores is None: scores = [1] * len(boxes) box_num = len(boxes) for i in range(box_num): if scores is not None and (scores[i] < drop_score or math.isnan(scores[i])): continue box = np.reshape(np.array(boxes[i]), [-1, 1, 2]).astype(np.int64) image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2) if txts is not None: img = np.array(resize_img(image, input_size=600)) txt_img = text_visual( txts, scores, img_h=img.shape[0], img_w=600, threshold=drop_score, font_path=font_path) img = np.concatenate([np.array(img), np.array(txt_img)], axis=1) return img return image def draw_ocr_box_txt(image, boxes, txts, scores=None, drop_score=0.5, font_path="./doc/simfang.ttf"): h, w = image.height, image.width img_left = image.copy() img_right = Image.new('RGB', (w, h), (255, 255, 255)) import random random.seed(0) draw_left = ImageDraw.Draw(img_left) draw_right = ImageDraw.Draw(img_right) for idx, (box, txt) in enumerate(zip(boxes, txts)): if scores is not None and scores[idx] < drop_score: continue color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) draw_left.polygon(box, fill=color) draw_right.polygon( [ box[0][0], box[0][1], box[1][0], box[1][1], box[2][0], box[2][1], box[3][0], box[3][1] ], outline=color) box_height = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][ 1])**2) box_width = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][ 1])**2) if box_height > 2 * box_width: font_size = max(int(box_width * 0.9), 10) font = ImageFont.truetype(font_path, font_size, encoding="utf-8") cur_y = box[0][1] for c in txt: char_size = font.getsize(c) draw_right.text( (box[0][0] + 3, cur_y), c, fill=(0, 0, 0), font=font) cur_y += char_size[1] else: font_size = max(int(box_height * 0.8), 10) font = ImageFont.truetype(font_path, font_size, encoding="utf-8") draw_right.text( [box[0][0], box[0][1]], txt, fill=(0, 0, 0), font=font) img_left = Image.blend(image, img_left, 0.5) img_show = Image.new('RGB', (w * 2, h), (255, 255, 255)) img_show.paste(img_left, (0, 0, w, h)) img_show.paste(img_right, (w, 0, w * 2, h)) return np.array(img_show) def str_count(s): """ Count the number of Chinese characters, a single English character and a single number equal to half the length of Chinese characters. args: s(string): the input of string return(int): the number of Chinese characters """ import string count_zh = count_pu = 0 s_len = len(s) en_dg_count = 0 for c in s: if c in string.ascii_letters or c.isdigit() or c.isspace(): en_dg_count += 1 elif c.isalpha(): count_zh += 1 else: count_pu += 1 return s_len - math.ceil(en_dg_count / 2) def text_visual(texts, scores, img_h=400, img_w=600, threshold=0., font_path="./doc/simfang.ttf"): """ create new blank img and draw txt on it args: texts(list): the text will be draw scores(list|None): corresponding score of each txt img_h(int): the height of blank img img_w(int): the width of blank img font_path: the path of font which is used to draw text return(array): """ if scores is not None: assert len(texts) == len( scores), "The number of txts and corresponding scores must match" def create_blank_img(): blank_img = np.ones(shape=[img_h, img_w], dtype=np.int8) * 255 blank_img[:, img_w - 1:] = 0 blank_img = Image.fromarray(blank_img).convert("RGB") draw_txt = ImageDraw.Draw(blank_img) return blank_img, draw_txt blank_img, draw_txt = create_blank_img() font_size = 20 txt_color = (0, 0, 0) font = ImageFont.truetype(font_path, font_size, encoding="utf-8") gap = font_size + 5 txt_img_list = [] count, index = 1, 0 for idx, txt in enumerate(texts): index += 1 if scores[idx] < threshold or math.isnan(scores[idx]): index -= 1 continue first_line = True while str_count(txt) >= img_w // font_size - 4: tmp = txt txt = tmp[:img_w // font_size - 4] if first_line: new_txt = str(index) + ': ' + txt first_line = False else: new_txt = ' ' + txt draw_txt.text((0, gap * count), new_txt, txt_color, font=font) txt = tmp[img_w // font_size - 4:] if count >= img_h // gap - 1: txt_img_list.append(np.array(blank_img)) blank_img, draw_txt = create_blank_img() count = 0 count += 1 if first_line: new_txt = str(index) + ': ' + txt + ' ' + '%.3f' % (scores[idx]) else: new_txt = " " + txt + " " + '%.3f' % (scores[idx]) draw_txt.text((0, gap * count), new_txt, txt_color, font=font) # whether add new blank img or not if count >= img_h // gap - 1 and idx + 1 < len(texts): txt_img_list.append(np.array(blank_img)) blank_img, draw_txt = create_blank_img() count = 0 count += 1 txt_img_list.append(np.array(blank_img)) if len(txt_img_list) == 1: blank_img = np.array(txt_img_list[0]) else: blank_img = np.concatenate(txt_img_list, axis=1) return np.array(blank_img) def base64_to_cv2(b64str): import base64 data = base64.b64decode(b64str.encode('utf8')) data = np.fromstring(data, np.uint8) data = cv2.imdecode(data, cv2.IMREAD_COLOR) return data def draw_boxes(image, boxes, scores=None, drop_score=0.5): if scores is None: scores = [1] * len(boxes) for (box, score) in zip(boxes, scores): if score < drop_score: continue box = np.reshape(np.array(box), [-1, 1, 2]).astype(np.int64) image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2) return image if __name__ == '__main__': test_img = "./doc/test_v2" predict_txt = "./doc/predict.txt" f = open(predict_txt, 'r') data = f.readlines() img_path, anno = data[0].strip().split('\t') img_name = os.path.basename(img_path) img_path = os.path.join(test_img, img_name) image = Image.open(img_path) data = json.loads(anno) boxes, txts, scores = [], [], [] for dic in data: boxes.append(dic['points']) txts.append(dic['transcription']) scores.append(round(dic['scores'], 3)) new_img = draw_ocr(image, boxes, txts, scores) cv2.imwrite(img_name, new_img)
get_express_route_gateway
ExpressRoute gateway resource. API Version: 2020-08-01. :param str express_route_gateway_name: The name of the ExpressRoute gateway. :param str resource_group_name: The name of the resource group.
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables from . import outputs __all__ = [ 'GetExpressRouteGatewayResult', 'AwaitableGetExpressRouteGatewayResult', 'get_express_route_gateway', ] @pulumi.output_type class GetExpressRouteGatewayResult: """ ExpressRoute gateway resource. """ def __init__(__self__, auto_scale_configuration=None, etag=None, express_route_connections=None, id=None, location=None, name=None, provisioning_state=None, tags=None, type=None, virtual_hub=None): if auto_scale_configuration and not isinstance(auto_scale_configuration, dict): raise TypeError("Expected argument 'auto_scale_configuration' to be a dict") pulumi.set(__self__, "auto_scale_configuration", auto_scale_configuration) if etag and not isinstance(etag, str): raise TypeError("Expected argument 'etag' to be a str") pulumi.set(__self__, "etag", etag) if express_route_connections and not isinstance(express_route_connections, list): raise TypeError("Expected argument 'express_route_connections' to be a list") pulumi.set(__self__, "express_route_connections", express_route_connections) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if location and not isinstance(location, str): raise TypeError("Expected argument 'location' to be a str") pulumi.set(__self__, "location", location) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if provisioning_state and not isinstance(provisioning_state, str): raise TypeError("Expected argument 'provisioning_state' to be a str") pulumi.set(__self__, "provisioning_state", provisioning_state) if tags and not isinstance(tags, dict): raise TypeError("Expected argument 'tags' to be a dict") pulumi.set(__self__, "tags", tags) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) if virtual_hub and not isinstance(virtual_hub, dict): raise TypeError("Expected argument 'virtual_hub' to be a dict") pulumi.set(__self__, "virtual_hub", virtual_hub) @property @pulumi.getter(name="autoScaleConfiguration") def auto_scale_configuration(self) -> Optional['outputs.ExpressRouteGatewayPropertiesResponseAutoScaleConfiguration']: """ Configuration for auto scaling. """ return pulumi.get(self, "auto_scale_configuration") @property @pulumi.getter def etag(self) -> str: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="expressRouteConnections") def express_route_connections(self) -> Sequence['outputs.ExpressRouteConnectionResponse']: """ List of ExpressRoute connections to the ExpressRoute gateway. """ return pulumi.get(self, "express_route_connections") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def location(self) -> Optional[str]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> str: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ The provisioning state of the express route gateway resource. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> str: """ Resource type. """ return pulumi.get(self, "type") @property @pulumi.getter(name="virtualHub") def virtual_hub(self) -> 'outputs.VirtualHubIdResponse': """ The Virtual Hub where the ExpressRoute gateway is or will be deployed. """ return pulumi.get(self, "virtual_hub") class AwaitableGetExpressRouteGatewayResult(GetExpressRouteGatewayResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetExpressRouteGatewayResult( auto_scale_configuration=self.auto_scale_configuration, etag=self.etag, express_route_connections=self.express_route_connections, id=self.id, location=self.location, name=self.name, provisioning_state=self.provisioning_state, tags=self.tags, type=self.type, virtual_hub=self.virtual_hub) # MASKED: get_express_route_gateway function (lines 154-184)
def get_express_route_gateway(express_route_gateway_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetExpressRouteGatewayResult: """ ExpressRoute gateway resource. API Version: 2020-08-01. :param str express_route_gateway_name: The name of the ExpressRoute gateway. :param str resource_group_name: The name of the resource group. """ __args__ = dict() __args__['expressRouteGatewayName'] = express_route_gateway_name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:network:getExpressRouteGateway', __args__, opts=opts, typ=GetExpressRouteGatewayResult).value return AwaitableGetExpressRouteGatewayResult( auto_scale_configuration=__ret__.auto_scale_configuration, etag=__ret__.etag, express_route_connections=__ret__.express_route_connections, id=__ret__.id, location=__ret__.location, name=__ret__.name, provisioning_state=__ret__.provisioning_state, tags=__ret__.tags, type=__ret__.type, virtual_hub=__ret__.virtual_hub)
154
184
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables from . import outputs __all__ = [ 'GetExpressRouteGatewayResult', 'AwaitableGetExpressRouteGatewayResult', 'get_express_route_gateway', ] @pulumi.output_type class GetExpressRouteGatewayResult: """ ExpressRoute gateway resource. """ def __init__(__self__, auto_scale_configuration=None, etag=None, express_route_connections=None, id=None, location=None, name=None, provisioning_state=None, tags=None, type=None, virtual_hub=None): if auto_scale_configuration and not isinstance(auto_scale_configuration, dict): raise TypeError("Expected argument 'auto_scale_configuration' to be a dict") pulumi.set(__self__, "auto_scale_configuration", auto_scale_configuration) if etag and not isinstance(etag, str): raise TypeError("Expected argument 'etag' to be a str") pulumi.set(__self__, "etag", etag) if express_route_connections and not isinstance(express_route_connections, list): raise TypeError("Expected argument 'express_route_connections' to be a list") pulumi.set(__self__, "express_route_connections", express_route_connections) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if location and not isinstance(location, str): raise TypeError("Expected argument 'location' to be a str") pulumi.set(__self__, "location", location) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if provisioning_state and not isinstance(provisioning_state, str): raise TypeError("Expected argument 'provisioning_state' to be a str") pulumi.set(__self__, "provisioning_state", provisioning_state) if tags and not isinstance(tags, dict): raise TypeError("Expected argument 'tags' to be a dict") pulumi.set(__self__, "tags", tags) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) if virtual_hub and not isinstance(virtual_hub, dict): raise TypeError("Expected argument 'virtual_hub' to be a dict") pulumi.set(__self__, "virtual_hub", virtual_hub) @property @pulumi.getter(name="autoScaleConfiguration") def auto_scale_configuration(self) -> Optional['outputs.ExpressRouteGatewayPropertiesResponseAutoScaleConfiguration']: """ Configuration for auto scaling. """ return pulumi.get(self, "auto_scale_configuration") @property @pulumi.getter def etag(self) -> str: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter(name="expressRouteConnections") def express_route_connections(self) -> Sequence['outputs.ExpressRouteConnectionResponse']: """ List of ExpressRoute connections to the ExpressRoute gateway. """ return pulumi.get(self, "express_route_connections") @property @pulumi.getter def id(self) -> Optional[str]: """ Resource ID. """ return pulumi.get(self, "id") @property @pulumi.getter def location(self) -> Optional[str]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> str: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ The provisioning state of the express route gateway resource. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> str: """ Resource type. """ return pulumi.get(self, "type") @property @pulumi.getter(name="virtualHub") def virtual_hub(self) -> 'outputs.VirtualHubIdResponse': """ The Virtual Hub where the ExpressRoute gateway is or will be deployed. """ return pulumi.get(self, "virtual_hub") class AwaitableGetExpressRouteGatewayResult(GetExpressRouteGatewayResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetExpressRouteGatewayResult( auto_scale_configuration=self.auto_scale_configuration, etag=self.etag, express_route_connections=self.express_route_connections, id=self.id, location=self.location, name=self.name, provisioning_state=self.provisioning_state, tags=self.tags, type=self.type, virtual_hub=self.virtual_hub) def get_express_route_gateway(express_route_gateway_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetExpressRouteGatewayResult: """ ExpressRoute gateway resource. API Version: 2020-08-01. :param str express_route_gateway_name: The name of the ExpressRoute gateway. :param str resource_group_name: The name of the resource group. """ __args__ = dict() __args__['expressRouteGatewayName'] = express_route_gateway_name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:network:getExpressRouteGateway', __args__, opts=opts, typ=GetExpressRouteGatewayResult).value return AwaitableGetExpressRouteGatewayResult( auto_scale_configuration=__ret__.auto_scale_configuration, etag=__ret__.etag, express_route_connections=__ret__.express_route_connections, id=__ret__.id, location=__ret__.location, name=__ret__.name, provisioning_state=__ret__.provisioning_state, tags=__ret__.tags, type=__ret__.type, virtual_hub=__ret__.virtual_hub)
wait_until_upload_url_changed
Wait until upload proxy url is changed Args: timeout (int): Time to wait for CDI Config. Returns: bool: True if url is equal to uploadProxyURL.
# -*- coding: utf-8 -*- import logging from ocp_resources.constants import PROTOCOL_ERROR_EXCEPTION_DICT from ocp_resources.resource import TIMEOUT, Resource from ocp_resources.utils import TimeoutSampler LOGGER = logging.getLogger(__name__) class CDIConfig(Resource): """ CDIConfig object. """ api_group = Resource.ApiGroup.CDI_KUBEVIRT_IO @property def scratch_space_storage_class_from_spec(self): return self.instance.spec.scratchSpaceStorageClass @property def scratch_space_storage_class_from_status(self): return self.instance.status.scratchSpaceStorageClass @property def upload_proxy_url(self): return self.instance.status.uploadProxyURL # MASKED: wait_until_upload_url_changed function (lines 32-57)
def wait_until_upload_url_changed(self, uploadproxy_url, timeout=TIMEOUT): """ Wait until upload proxy url is changed Args: timeout (int): Time to wait for CDI Config. Returns: bool: True if url is equal to uploadProxyURL. """ LOGGER.info( f"Wait for {self.kind} {self.name} to ensure current URL == uploadProxyURL" ) samples = TimeoutSampler( wait_timeout=timeout, sleep=1, exceptions_dict=PROTOCOL_ERROR_EXCEPTION_DICT, func=self.api.get, field_selector=f"metadata.name=={self.name}", ) for sample in samples: if sample.items: status = sample.items[0].status current_url = status.uploadProxyURL if current_url == uploadproxy_url: return
32
57
# -*- coding: utf-8 -*- import logging from ocp_resources.constants import PROTOCOL_ERROR_EXCEPTION_DICT from ocp_resources.resource import TIMEOUT, Resource from ocp_resources.utils import TimeoutSampler LOGGER = logging.getLogger(__name__) class CDIConfig(Resource): """ CDIConfig object. """ api_group = Resource.ApiGroup.CDI_KUBEVIRT_IO @property def scratch_space_storage_class_from_spec(self): return self.instance.spec.scratchSpaceStorageClass @property def scratch_space_storage_class_from_status(self): return self.instance.status.scratchSpaceStorageClass @property def upload_proxy_url(self): return self.instance.status.uploadProxyURL def wait_until_upload_url_changed(self, uploadproxy_url, timeout=TIMEOUT): """ Wait until upload proxy url is changed Args: timeout (int): Time to wait for CDI Config. Returns: bool: True if url is equal to uploadProxyURL. """ LOGGER.info( f"Wait for {self.kind} {self.name} to ensure current URL == uploadProxyURL" ) samples = TimeoutSampler( wait_timeout=timeout, sleep=1, exceptions_dict=PROTOCOL_ERROR_EXCEPTION_DICT, func=self.api.get, field_selector=f"metadata.name=={self.name}", ) for sample in samples: if sample.items: status = sample.items[0].status current_url = status.uploadProxyURL if current_url == uploadproxy_url: return
as_padded_tensor
This method pads a list of tokens to ``desired_num_tokens`` and returns that padded list of input tokens as a torch Tensor. If the input token list is longer than ``desired_num_tokens`` then it will be truncated. ``padding_lengths`` is used to provide supplemental padding parameters which are needed in some cases. For example, it contains the widths to pad characters to when doing character-level padding. Note that this method should be abstract, but it is implemented to allow backward compatability.
from typing import Dict, List, TypeVar, Generic import warnings import torch import numpy from allennlp.common import Registrable from allennlp.data.tokenizers.token import Token from allennlp.data.vocabulary import Vocabulary TokenType = TypeVar("TokenType", int, List[int], numpy.ndarray) class TokenIndexer(Generic[TokenType], Registrable): """ A ``TokenIndexer`` determines how string tokens get represented as arrays of indices in a model. This class both converts strings into numerical values, with the help of a :class:`~allennlp.data.vocabulary.Vocabulary`, and it produces actual arrays. Tokens can be represented as single IDs (e.g., the word "cat" gets represented by the number 34), or as lists of character IDs (e.g., "cat" gets represented by the numbers [23, 10, 18]), or in some other way that you can come up with (e.g., if you have some structured input you want to represent in a special way in your data arrays, you can do that here). # Parameters token_min_padding_length : ``int``, optional (default=``0``) The minimum padding length required for the :class:`TokenIndexer`. For example, the minimum padding length of :class:`SingleIdTokenIndexer` is the largest size of filter when using :class:`CnnEncoder`. Note that if you set this for one TokenIndexer, you likely have to set it for all :class:`TokenIndexer` for the same field, otherwise you'll get mismatched tensor sizes. """ default_implementation = "single_id" has_warned_for_as_padded_tensor = False def __init__(self, token_min_padding_length: int = 0) -> None: self._token_min_padding_length: int = token_min_padding_length def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]): """ The :class:`Vocabulary` needs to assign indices to whatever strings we see in the training data (possibly doing some frequency filtering and using an OOV, or out of vocabulary, token). This method takes a token and a dictionary of counts and increments counts for whatever vocabulary items are present in the token. If this is a single token ID representation, the vocabulary item is likely the token itself. If this is a token characters representation, the vocabulary items are all of the characters in the token. """ raise NotImplementedError def tokens_to_indices( self, tokens: List[Token], vocabulary: Vocabulary, index_name: str ) -> Dict[str, List[TokenType]]: """ Takes a list of tokens and converts them to one or more sets of indices. This could be just an ID for each token from the vocabulary. Or it could split each token into characters and return one ID per character. Or (for instance, in the case of byte-pair encoding) there might not be a clean mapping from individual tokens to indices. """ raise NotImplementedError def get_padding_token(self) -> TokenType: """ Deprecated. Please just implement the padding token in `as_padded_tensor` instead. TODO(Mark): remove in 1.0 release. This is only a concrete implementation to preserve backward compatability, otherwise it would be abstract. When we need to add padding tokens, what should they look like? This method returns a "blank" token of whatever type is returned by :func:`tokens_to_indices`. """ warnings.warn( "Using a Field with get_padding_token as an inherited method," " which will be depreciated in 1.0.0." "Please implement as_padded_tensor instead.", FutureWarning, ) return 0 # type: ignore def get_padding_lengths(self, token: TokenType) -> Dict[str, int]: """ This method returns a padding dictionary for the given token that specifies lengths for all arrays that need padding. For example, for single ID tokens the returned dictionary will be empty, but for a token characters representation, this will return the number of characters in the token. """ raise NotImplementedError def get_token_min_padding_length(self) -> int: """ This method returns the minimum padding length required for this TokenIndexer. For example, the minimum padding length of `SingleIdTokenIndexer` is the largest size of filter when using `CnnEncoder`. """ return self._token_min_padding_length # MASKED: as_padded_tensor function (lines 98-124) def pad_token_sequence( self, tokens: Dict[str, List[TokenType]], desired_num_tokens: Dict[str, int], padding_lengths: Dict[str, int], ) -> Dict[str, TokenType]: """ Deprecated. Please use `as_padded_tensor` instead. TODO(Mark): remove in 1.0 release. """ raise NotImplementedError def get_keys(self, index_name: str) -> List[str]: """ Return a list of the keys this indexer return from ``tokens_to_indices``. """ return [index_name] def __eq__(self, other) -> bool: if isinstance(self, other.__class__): return self.__dict__ == other.__dict__ return NotImplemented
def as_padded_tensor( self, tokens: Dict[str, List[TokenType]], desired_num_tokens: Dict[str, int], padding_lengths: Dict[str, int], ) -> Dict[str, torch.Tensor]: """ This method pads a list of tokens to ``desired_num_tokens`` and returns that padded list of input tokens as a torch Tensor. If the input token list is longer than ``desired_num_tokens`` then it will be truncated. ``padding_lengths`` is used to provide supplemental padding parameters which are needed in some cases. For example, it contains the widths to pad characters to when doing character-level padding. Note that this method should be abstract, but it is implemented to allow backward compatability. """ if not self.has_warned_for_as_padded_tensor: warnings.warn( "Using a Field with pad_token_sequence, which will be depreciated in 1.0.0." "Please implement as_padded_tensor instead.", FutureWarning, ) self.has_warned_for_as_padded_tensor = True padded = self.pad_token_sequence(tokens, desired_num_tokens, padding_lengths) return {key: torch.LongTensor(array) for key, array in padded.items()}
98
124
from typing import Dict, List, TypeVar, Generic import warnings import torch import numpy from allennlp.common import Registrable from allennlp.data.tokenizers.token import Token from allennlp.data.vocabulary import Vocabulary TokenType = TypeVar("TokenType", int, List[int], numpy.ndarray) class TokenIndexer(Generic[TokenType], Registrable): """ A ``TokenIndexer`` determines how string tokens get represented as arrays of indices in a model. This class both converts strings into numerical values, with the help of a :class:`~allennlp.data.vocabulary.Vocabulary`, and it produces actual arrays. Tokens can be represented as single IDs (e.g., the word "cat" gets represented by the number 34), or as lists of character IDs (e.g., "cat" gets represented by the numbers [23, 10, 18]), or in some other way that you can come up with (e.g., if you have some structured input you want to represent in a special way in your data arrays, you can do that here). # Parameters token_min_padding_length : ``int``, optional (default=``0``) The minimum padding length required for the :class:`TokenIndexer`. For example, the minimum padding length of :class:`SingleIdTokenIndexer` is the largest size of filter when using :class:`CnnEncoder`. Note that if you set this for one TokenIndexer, you likely have to set it for all :class:`TokenIndexer` for the same field, otherwise you'll get mismatched tensor sizes. """ default_implementation = "single_id" has_warned_for_as_padded_tensor = False def __init__(self, token_min_padding_length: int = 0) -> None: self._token_min_padding_length: int = token_min_padding_length def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]): """ The :class:`Vocabulary` needs to assign indices to whatever strings we see in the training data (possibly doing some frequency filtering and using an OOV, or out of vocabulary, token). This method takes a token and a dictionary of counts and increments counts for whatever vocabulary items are present in the token. If this is a single token ID representation, the vocabulary item is likely the token itself. If this is a token characters representation, the vocabulary items are all of the characters in the token. """ raise NotImplementedError def tokens_to_indices( self, tokens: List[Token], vocabulary: Vocabulary, index_name: str ) -> Dict[str, List[TokenType]]: """ Takes a list of tokens and converts them to one or more sets of indices. This could be just an ID for each token from the vocabulary. Or it could split each token into characters and return one ID per character. Or (for instance, in the case of byte-pair encoding) there might not be a clean mapping from individual tokens to indices. """ raise NotImplementedError def get_padding_token(self) -> TokenType: """ Deprecated. Please just implement the padding token in `as_padded_tensor` instead. TODO(Mark): remove in 1.0 release. This is only a concrete implementation to preserve backward compatability, otherwise it would be abstract. When we need to add padding tokens, what should they look like? This method returns a "blank" token of whatever type is returned by :func:`tokens_to_indices`. """ warnings.warn( "Using a Field with get_padding_token as an inherited method," " which will be depreciated in 1.0.0." "Please implement as_padded_tensor instead.", FutureWarning, ) return 0 # type: ignore def get_padding_lengths(self, token: TokenType) -> Dict[str, int]: """ This method returns a padding dictionary for the given token that specifies lengths for all arrays that need padding. For example, for single ID tokens the returned dictionary will be empty, but for a token characters representation, this will return the number of characters in the token. """ raise NotImplementedError def get_token_min_padding_length(self) -> int: """ This method returns the minimum padding length required for this TokenIndexer. For example, the minimum padding length of `SingleIdTokenIndexer` is the largest size of filter when using `CnnEncoder`. """ return self._token_min_padding_length def as_padded_tensor( self, tokens: Dict[str, List[TokenType]], desired_num_tokens: Dict[str, int], padding_lengths: Dict[str, int], ) -> Dict[str, torch.Tensor]: """ This method pads a list of tokens to ``desired_num_tokens`` and returns that padded list of input tokens as a torch Tensor. If the input token list is longer than ``desired_num_tokens`` then it will be truncated. ``padding_lengths`` is used to provide supplemental padding parameters which are needed in some cases. For example, it contains the widths to pad characters to when doing character-level padding. Note that this method should be abstract, but it is implemented to allow backward compatability. """ if not self.has_warned_for_as_padded_tensor: warnings.warn( "Using a Field with pad_token_sequence, which will be depreciated in 1.0.0." "Please implement as_padded_tensor instead.", FutureWarning, ) self.has_warned_for_as_padded_tensor = True padded = self.pad_token_sequence(tokens, desired_num_tokens, padding_lengths) return {key: torch.LongTensor(array) for key, array in padded.items()} def pad_token_sequence( self, tokens: Dict[str, List[TokenType]], desired_num_tokens: Dict[str, int], padding_lengths: Dict[str, int], ) -> Dict[str, TokenType]: """ Deprecated. Please use `as_padded_tensor` instead. TODO(Mark): remove in 1.0 release. """ raise NotImplementedError def get_keys(self, index_name: str) -> List[str]: """ Return a list of the keys this indexer return from ``tokens_to_indices``. """ return [index_name] def __eq__(self, other) -> bool: if isinstance(self, other.__class__): return self.__dict__ == other.__dict__ return NotImplemented
forward
Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., input feeding/teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from numpy.random import uniform from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, LayerNorm, MultiheadAttention, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from bert import BertTokenizer DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 from bert import BertModel @register_model('transformer') class TransformerModel(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) @register_model('transformers2') class TransformerS2Model(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerS2Encoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) # MASKED: forward function (lines 316-352) @register_model('transformerstack') class TransformerModelStack(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoderStack(args, tgt_dict, embed_tokens) class TransformerEncoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerEncoderLayer(args) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.layer_norm(x) return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerS2Encoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.output_mask = nn.Softmax(dim = 0) self.t_layer = nn.Linear(512, 1) self.output_vocab_linear = nn.Linear(512, embed_tokens.num_embeddings) embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.encoder_layers self.layers = nn.ModuleList([]) self.layers.extend([ TransformerS2EncoderLayer(args, bert_gate=bert_gates[i]) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None self.mask_embedding = nn.init.normal_(nn.Parameter(torch.zeros((1, embed_dim)))) self.mask_layers = nn.ModuleList([]) self.mask_layers.extend([ TransformerEncoderLayer(args) for i in range(2) ]) if args.encoder_normalize_before: self.mask_layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None ''' self.x = None self.unmask_output = None self.mask_output = None self.encoder_vocab_output = None self.backwards = 0 ''' self.i = 0 def forward(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C # T x B mask model ########### ########### ########### ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p.transpose(0, 1) t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1).transpose(0,1) self.mask_output = p if self.training: x = x * p_mask.detach() else: x = x ########### ########### ########### # t_p[t_p>t_p.size*ratio] = 1 # t_p[t_p<=t_p.size*ratio] = 0 # t_p.permute(1,0) # model.encoder.mask_output ''' x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) # if self.training: ''' self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) ''' ''' ########################## if self.i%1==0: import scipy.io as scio self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) scio.savemat("/home/iojhui/bert-nmt/data"+str(self.i)+".mat", {'mask_output':self.mask_output.detach().cpu().numpy(),"src_tokens":src_tokens.cpu().numpy()}) self.i+=1 ######################## ''' return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def encodeMLM(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions self.src_tokens = src_tokens x = self.embed_scale * self.embed_tokens(src_tokens) ''' ratio = 0.3 mask = np.random.choice(src_tokens.size()[1], (int(src_tokens.size()[1] * ratio), ),replace = False) if mask is not None: ''' ''' if x.size(1)<10: mask = [4] else: mask = [7,9] x[:, mask] = self.mask_embedding ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1) self.mask_output = p x = x * p_mask.detach() if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) encoder_vocab_output = self.output_vocab_linear(x) self.encoder_vocab_output2 = torch.nn.functional.softmax(encoder_vocab_output,dim=-1) self.token = src_tokens return encoder_vocab_output def mask(self, src_tokens, x): x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.mask_layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.mask_layer_norm(x) x = self.t_layer(x).squeeze(-1) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.transpose(0,1),value=torch.tensor(float('-inf'))) return self.output_mask(x).transpose(0, 1) def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.decoder_layers print('bert_gates', bert_gates) self.layers = nn.ModuleList([]) decoder_no_bert = getattr(args, 'decoder_no_bert', False) if decoder_no_bert: self.layers.extend([ TransformerStandardDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) else: self.layers.extend([ TransformerDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoderStack(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerDecoderLayerStack(args, no_encoder_attn) for _ in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x, attn_weight = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) self.attn_weight = attn_weight residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerS2EncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, bert_gate=True): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask, bert_encoder_out, bert_encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x1, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x2, _ = self.bert_attn(query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x2, _ = self.bert_attn( query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerStandardDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) # bert_out_dim = args.bert_out_dim # self.bert_attn = MultiheadAttention( # self.embed_dim, args.decoder_attention_heads, # kdim=bert_out_dim, vdim=bert_out_dim, # dropout=args.attention_dropout, encoder_decoder_attention=True # ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) # x2, _ = self.bert_attn( # query=x, # key=bert_encoder_out, # value=bert_encoder_out, # key_padding_mask=bert_encoder_padding_mask, # incremental_state=incremental_state, # static_kv=True, # need_weights=(not self.training and self.need_attn), # ) x1 = F.dropout(x1, p=self.dropout, training=self.training) # x2 = F.dropout(x2, p=self.dropout, training=self.training) # ratios = self.get_ratio() x = residual + x1 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerDecoderLayerStack(nn.Module): def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.bert_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.bert_first = args.bert_first self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) def sinattn(attnlayer, x, layer_norm, keyorvalue, key_padding, incremental_state): residual = x x = self.maybe_layer_norm(layer_norm, x, before=True) x, attn = attnlayer( query=x, key=keyorvalue, value=keyorvalue, key_padding_mask=key_padding, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(layer_norm, x, after=True) return x, attn if self.bert_first: x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) else: x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m @register_model_architecture('transformer', 'transformer') def base_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformers2', 'transformers2') def base_architecture_s2(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformerstack', 'transformerstack') def base_stack_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformer', 'transformer_iwslt_de_en') def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_iwslt_de_en') def transformer_s2_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture_s2(args) @register_model_architecture('transformerstack', 'transformerstack_iwslt_de_en') def transformerstack_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_stack_architecture(args) @register_model_architecture('transformers2', 'transformer_wmt_en_de') def transformer_wmt_en_de(args): base_architecture_s2(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big') def transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_vaswani_wmt_en_de_big') def transformer_s2_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture_s2(args) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big') def transformer_vaswani_wmt_en_fr_big(args): args.dropout = getattr(args, 'dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) @register_model_architecture('transformer', 'transformer_wmt_en_de_big') def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, 'attention_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) # default parameters used in tensor2tensor implementation @register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t') def transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args)
def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs): """ Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., input feeding/teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad()) bert_encoder_out, _ = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask= ~ bert_encoder_padding_mask) bert_encoder_out = bert_encoder_out[self.bert_output_layer] if self.mask_cls_sep: bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls()) bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep()) bert_encoder_out = bert_encoder_out.permute(1,0,2).contiguous() bert_encoder_out = { 'bert_encoder_out': bert_encoder_out, 'bert_encoder_padding_mask': bert_encoder_padding_mask, } encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out) decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs) return decoder_out
316
352
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from numpy.random import uniform from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, LayerNorm, MultiheadAttention, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from bert import BertTokenizer DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 from bert import BertModel @register_model('transformer') class TransformerModel(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) @register_model('transformers2') class TransformerS2Model(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerS2Encoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs): """ Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., input feeding/teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad()) bert_encoder_out, _ = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask= ~ bert_encoder_padding_mask) bert_encoder_out = bert_encoder_out[self.bert_output_layer] if self.mask_cls_sep: bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls()) bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep()) bert_encoder_out = bert_encoder_out.permute(1,0,2).contiguous() bert_encoder_out = { 'bert_encoder_out': bert_encoder_out, 'bert_encoder_padding_mask': bert_encoder_padding_mask, } encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out) decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs) return decoder_out @register_model('transformerstack') class TransformerModelStack(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoderStack(args, tgt_dict, embed_tokens) class TransformerEncoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerEncoderLayer(args) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.layer_norm(x) return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerS2Encoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.output_mask = nn.Softmax(dim = 0) self.t_layer = nn.Linear(512, 1) self.output_vocab_linear = nn.Linear(512, embed_tokens.num_embeddings) embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.encoder_layers self.layers = nn.ModuleList([]) self.layers.extend([ TransformerS2EncoderLayer(args, bert_gate=bert_gates[i]) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None self.mask_embedding = nn.init.normal_(nn.Parameter(torch.zeros((1, embed_dim)))) self.mask_layers = nn.ModuleList([]) self.mask_layers.extend([ TransformerEncoderLayer(args) for i in range(2) ]) if args.encoder_normalize_before: self.mask_layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None ''' self.x = None self.unmask_output = None self.mask_output = None self.encoder_vocab_output = None self.backwards = 0 ''' self.i = 0 def forward(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C # T x B mask model ########### ########### ########### ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p.transpose(0, 1) t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1).transpose(0,1) self.mask_output = p if self.training: x = x * p_mask.detach() else: x = x ########### ########### ########### # t_p[t_p>t_p.size*ratio] = 1 # t_p[t_p<=t_p.size*ratio] = 0 # t_p.permute(1,0) # model.encoder.mask_output ''' x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) # if self.training: ''' self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) ''' ''' ########################## if self.i%1==0: import scipy.io as scio self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) scio.savemat("/home/iojhui/bert-nmt/data"+str(self.i)+".mat", {'mask_output':self.mask_output.detach().cpu().numpy(),"src_tokens":src_tokens.cpu().numpy()}) self.i+=1 ######################## ''' return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def encodeMLM(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions self.src_tokens = src_tokens x = self.embed_scale * self.embed_tokens(src_tokens) ''' ratio = 0.3 mask = np.random.choice(src_tokens.size()[1], (int(src_tokens.size()[1] * ratio), ),replace = False) if mask is not None: ''' ''' if x.size(1)<10: mask = [4] else: mask = [7,9] x[:, mask] = self.mask_embedding ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1) self.mask_output = p x = x * p_mask.detach() if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) encoder_vocab_output = self.output_vocab_linear(x) self.encoder_vocab_output2 = torch.nn.functional.softmax(encoder_vocab_output,dim=-1) self.token = src_tokens return encoder_vocab_output def mask(self, src_tokens, x): x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.mask_layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.mask_layer_norm(x) x = self.t_layer(x).squeeze(-1) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.transpose(0,1),value=torch.tensor(float('-inf'))) return self.output_mask(x).transpose(0, 1) def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.decoder_layers print('bert_gates', bert_gates) self.layers = nn.ModuleList([]) decoder_no_bert = getattr(args, 'decoder_no_bert', False) if decoder_no_bert: self.layers.extend([ TransformerStandardDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) else: self.layers.extend([ TransformerDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoderStack(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerDecoderLayerStack(args, no_encoder_attn) for _ in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x, attn_weight = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) self.attn_weight = attn_weight residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerS2EncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, bert_gate=True): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask, bert_encoder_out, bert_encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x1, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x2, _ = self.bert_attn(query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x2, _ = self.bert_attn( query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerStandardDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) # bert_out_dim = args.bert_out_dim # self.bert_attn = MultiheadAttention( # self.embed_dim, args.decoder_attention_heads, # kdim=bert_out_dim, vdim=bert_out_dim, # dropout=args.attention_dropout, encoder_decoder_attention=True # ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) # x2, _ = self.bert_attn( # query=x, # key=bert_encoder_out, # value=bert_encoder_out, # key_padding_mask=bert_encoder_padding_mask, # incremental_state=incremental_state, # static_kv=True, # need_weights=(not self.training and self.need_attn), # ) x1 = F.dropout(x1, p=self.dropout, training=self.training) # x2 = F.dropout(x2, p=self.dropout, training=self.training) # ratios = self.get_ratio() x = residual + x1 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerDecoderLayerStack(nn.Module): def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.bert_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.bert_first = args.bert_first self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) def sinattn(attnlayer, x, layer_norm, keyorvalue, key_padding, incremental_state): residual = x x = self.maybe_layer_norm(layer_norm, x, before=True) x, attn = attnlayer( query=x, key=keyorvalue, value=keyorvalue, key_padding_mask=key_padding, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(layer_norm, x, after=True) return x, attn if self.bert_first: x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) else: x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m @register_model_architecture('transformer', 'transformer') def base_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformers2', 'transformers2') def base_architecture_s2(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformerstack', 'transformerstack') def base_stack_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformer', 'transformer_iwslt_de_en') def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_iwslt_de_en') def transformer_s2_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture_s2(args) @register_model_architecture('transformerstack', 'transformerstack_iwslt_de_en') def transformerstack_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_stack_architecture(args) @register_model_architecture('transformers2', 'transformer_wmt_en_de') def transformer_wmt_en_de(args): base_architecture_s2(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big') def transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_vaswani_wmt_en_de_big') def transformer_s2_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture_s2(args) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big') def transformer_vaswani_wmt_en_fr_big(args): args.dropout = getattr(args, 'dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) @register_model_architecture('transformer', 'transformer_wmt_en_de_big') def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, 'attention_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) # default parameters used in tensor2tensor implementation @register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t') def transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args)
forward
Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)`
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from numpy.random import uniform from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, LayerNorm, MultiheadAttention, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from bert import BertTokenizer DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 from bert import BertModel @register_model('transformer') class TransformerModel(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) @register_model('transformers2') class TransformerS2Model(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerS2Encoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs): """ Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., input feeding/teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad()) bert_encoder_out, _ = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask= ~ bert_encoder_padding_mask) bert_encoder_out = bert_encoder_out[self.bert_output_layer] if self.mask_cls_sep: bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls()) bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep()) bert_encoder_out = bert_encoder_out.permute(1,0,2).contiguous() bert_encoder_out = { 'bert_encoder_out': bert_encoder_out, 'bert_encoder_padding_mask': bert_encoder_padding_mask, } encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out) decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs) return decoder_out @register_model('transformerstack') class TransformerModelStack(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoderStack(args, tgt_dict, embed_tokens) class TransformerEncoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerEncoderLayer(args) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None # MASKED: forward function (lines 534-573) def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerS2Encoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.output_mask = nn.Softmax(dim = 0) self.t_layer = nn.Linear(512, 1) self.output_vocab_linear = nn.Linear(512, embed_tokens.num_embeddings) embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.encoder_layers self.layers = nn.ModuleList([]) self.layers.extend([ TransformerS2EncoderLayer(args, bert_gate=bert_gates[i]) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None self.mask_embedding = nn.init.normal_(nn.Parameter(torch.zeros((1, embed_dim)))) self.mask_layers = nn.ModuleList([]) self.mask_layers.extend([ TransformerEncoderLayer(args) for i in range(2) ]) if args.encoder_normalize_before: self.mask_layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None ''' self.x = None self.unmask_output = None self.mask_output = None self.encoder_vocab_output = None self.backwards = 0 ''' self.i = 0 def forward(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C # T x B mask model ########### ########### ########### ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p.transpose(0, 1) t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1).transpose(0,1) self.mask_output = p if self.training: x = x * p_mask.detach() else: x = x ########### ########### ########### # t_p[t_p>t_p.size*ratio] = 1 # t_p[t_p<=t_p.size*ratio] = 0 # t_p.permute(1,0) # model.encoder.mask_output ''' x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) # if self.training: ''' self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) ''' ''' ########################## if self.i%1==0: import scipy.io as scio self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) scio.savemat("/home/iojhui/bert-nmt/data"+str(self.i)+".mat", {'mask_output':self.mask_output.detach().cpu().numpy(),"src_tokens":src_tokens.cpu().numpy()}) self.i+=1 ######################## ''' return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def encodeMLM(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions self.src_tokens = src_tokens x = self.embed_scale * self.embed_tokens(src_tokens) ''' ratio = 0.3 mask = np.random.choice(src_tokens.size()[1], (int(src_tokens.size()[1] * ratio), ),replace = False) if mask is not None: ''' ''' if x.size(1)<10: mask = [4] else: mask = [7,9] x[:, mask] = self.mask_embedding ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1) self.mask_output = p x = x * p_mask.detach() if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) encoder_vocab_output = self.output_vocab_linear(x) self.encoder_vocab_output2 = torch.nn.functional.softmax(encoder_vocab_output,dim=-1) self.token = src_tokens return encoder_vocab_output def mask(self, src_tokens, x): x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.mask_layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.mask_layer_norm(x) x = self.t_layer(x).squeeze(-1) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.transpose(0,1),value=torch.tensor(float('-inf'))) return self.output_mask(x).transpose(0, 1) def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.decoder_layers print('bert_gates', bert_gates) self.layers = nn.ModuleList([]) decoder_no_bert = getattr(args, 'decoder_no_bert', False) if decoder_no_bert: self.layers.extend([ TransformerStandardDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) else: self.layers.extend([ TransformerDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoderStack(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerDecoderLayerStack(args, no_encoder_attn) for _ in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x, attn_weight = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) self.attn_weight = attn_weight residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerS2EncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, bert_gate=True): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask, bert_encoder_out, bert_encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x1, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x2, _ = self.bert_attn(query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x2, _ = self.bert_attn( query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerStandardDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) # bert_out_dim = args.bert_out_dim # self.bert_attn = MultiheadAttention( # self.embed_dim, args.decoder_attention_heads, # kdim=bert_out_dim, vdim=bert_out_dim, # dropout=args.attention_dropout, encoder_decoder_attention=True # ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) # x2, _ = self.bert_attn( # query=x, # key=bert_encoder_out, # value=bert_encoder_out, # key_padding_mask=bert_encoder_padding_mask, # incremental_state=incremental_state, # static_kv=True, # need_weights=(not self.training and self.need_attn), # ) x1 = F.dropout(x1, p=self.dropout, training=self.training) # x2 = F.dropout(x2, p=self.dropout, training=self.training) # ratios = self.get_ratio() x = residual + x1 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerDecoderLayerStack(nn.Module): def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.bert_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.bert_first = args.bert_first self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) def sinattn(attnlayer, x, layer_norm, keyorvalue, key_padding, incremental_state): residual = x x = self.maybe_layer_norm(layer_norm, x, before=True) x, attn = attnlayer( query=x, key=keyorvalue, value=keyorvalue, key_padding_mask=key_padding, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(layer_norm, x, after=True) return x, attn if self.bert_first: x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) else: x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m @register_model_architecture('transformer', 'transformer') def base_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformers2', 'transformers2') def base_architecture_s2(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformerstack', 'transformerstack') def base_stack_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformer', 'transformer_iwslt_de_en') def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_iwslt_de_en') def transformer_s2_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture_s2(args) @register_model_architecture('transformerstack', 'transformerstack_iwslt_de_en') def transformerstack_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_stack_architecture(args) @register_model_architecture('transformers2', 'transformer_wmt_en_de') def transformer_wmt_en_de(args): base_architecture_s2(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big') def transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_vaswani_wmt_en_de_big') def transformer_s2_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture_s2(args) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big') def transformer_vaswani_wmt_en_fr_big(args): args.dropout = getattr(args, 'dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) @register_model_architecture('transformer', 'transformer_wmt_en_de_big') def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, 'attention_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) # default parameters used in tensor2tensor implementation @register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t') def transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args)
def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.layer_norm(x) return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T }
534
573
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from numpy.random import uniform from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, LayerNorm, MultiheadAttention, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from bert import BertTokenizer DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 from bert import BertModel @register_model('transformer') class TransformerModel(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) @register_model('transformers2') class TransformerS2Model(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerS2Encoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs): """ Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., input feeding/teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad()) bert_encoder_out, _ = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask= ~ bert_encoder_padding_mask) bert_encoder_out = bert_encoder_out[self.bert_output_layer] if self.mask_cls_sep: bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls()) bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep()) bert_encoder_out = bert_encoder_out.permute(1,0,2).contiguous() bert_encoder_out = { 'bert_encoder_out': bert_encoder_out, 'bert_encoder_padding_mask': bert_encoder_padding_mask, } encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out) decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs) return decoder_out @register_model('transformerstack') class TransformerModelStack(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoderStack(args, tgt_dict, embed_tokens) class TransformerEncoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerEncoderLayer(args) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.layer_norm(x) return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerS2Encoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.output_mask = nn.Softmax(dim = 0) self.t_layer = nn.Linear(512, 1) self.output_vocab_linear = nn.Linear(512, embed_tokens.num_embeddings) embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.encoder_layers self.layers = nn.ModuleList([]) self.layers.extend([ TransformerS2EncoderLayer(args, bert_gate=bert_gates[i]) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None self.mask_embedding = nn.init.normal_(nn.Parameter(torch.zeros((1, embed_dim)))) self.mask_layers = nn.ModuleList([]) self.mask_layers.extend([ TransformerEncoderLayer(args) for i in range(2) ]) if args.encoder_normalize_before: self.mask_layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None ''' self.x = None self.unmask_output = None self.mask_output = None self.encoder_vocab_output = None self.backwards = 0 ''' self.i = 0 def forward(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C # T x B mask model ########### ########### ########### ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p.transpose(0, 1) t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1).transpose(0,1) self.mask_output = p if self.training: x = x * p_mask.detach() else: x = x ########### ########### ########### # t_p[t_p>t_p.size*ratio] = 1 # t_p[t_p<=t_p.size*ratio] = 0 # t_p.permute(1,0) # model.encoder.mask_output ''' x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) # if self.training: ''' self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) ''' ''' ########################## if self.i%1==0: import scipy.io as scio self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) scio.savemat("/home/iojhui/bert-nmt/data"+str(self.i)+".mat", {'mask_output':self.mask_output.detach().cpu().numpy(),"src_tokens":src_tokens.cpu().numpy()}) self.i+=1 ######################## ''' return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def encodeMLM(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions self.src_tokens = src_tokens x = self.embed_scale * self.embed_tokens(src_tokens) ''' ratio = 0.3 mask = np.random.choice(src_tokens.size()[1], (int(src_tokens.size()[1] * ratio), ),replace = False) if mask is not None: ''' ''' if x.size(1)<10: mask = [4] else: mask = [7,9] x[:, mask] = self.mask_embedding ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1) self.mask_output = p x = x * p_mask.detach() if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) encoder_vocab_output = self.output_vocab_linear(x) self.encoder_vocab_output2 = torch.nn.functional.softmax(encoder_vocab_output,dim=-1) self.token = src_tokens return encoder_vocab_output def mask(self, src_tokens, x): x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.mask_layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.mask_layer_norm(x) x = self.t_layer(x).squeeze(-1) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.transpose(0,1),value=torch.tensor(float('-inf'))) return self.output_mask(x).transpose(0, 1) def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.decoder_layers print('bert_gates', bert_gates) self.layers = nn.ModuleList([]) decoder_no_bert = getattr(args, 'decoder_no_bert', False) if decoder_no_bert: self.layers.extend([ TransformerStandardDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) else: self.layers.extend([ TransformerDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoderStack(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerDecoderLayerStack(args, no_encoder_attn) for _ in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x, attn_weight = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) self.attn_weight = attn_weight residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerS2EncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, bert_gate=True): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask, bert_encoder_out, bert_encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x1, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x2, _ = self.bert_attn(query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x2, _ = self.bert_attn( query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerStandardDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) # bert_out_dim = args.bert_out_dim # self.bert_attn = MultiheadAttention( # self.embed_dim, args.decoder_attention_heads, # kdim=bert_out_dim, vdim=bert_out_dim, # dropout=args.attention_dropout, encoder_decoder_attention=True # ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) # x2, _ = self.bert_attn( # query=x, # key=bert_encoder_out, # value=bert_encoder_out, # key_padding_mask=bert_encoder_padding_mask, # incremental_state=incremental_state, # static_kv=True, # need_weights=(not self.training and self.need_attn), # ) x1 = F.dropout(x1, p=self.dropout, training=self.training) # x2 = F.dropout(x2, p=self.dropout, training=self.training) # ratios = self.get_ratio() x = residual + x1 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerDecoderLayerStack(nn.Module): def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.bert_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.bert_first = args.bert_first self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) def sinattn(attnlayer, x, layer_norm, keyorvalue, key_padding, incremental_state): residual = x x = self.maybe_layer_norm(layer_norm, x, before=True) x, attn = attnlayer( query=x, key=keyorvalue, value=keyorvalue, key_padding_mask=key_padding, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(layer_norm, x, after=True) return x, attn if self.bert_first: x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) else: x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m @register_model_architecture('transformer', 'transformer') def base_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformers2', 'transformers2') def base_architecture_s2(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformerstack', 'transformerstack') def base_stack_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformer', 'transformer_iwslt_de_en') def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_iwslt_de_en') def transformer_s2_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture_s2(args) @register_model_architecture('transformerstack', 'transformerstack_iwslt_de_en') def transformerstack_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_stack_architecture(args) @register_model_architecture('transformers2', 'transformer_wmt_en_de') def transformer_wmt_en_de(args): base_architecture_s2(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big') def transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_vaswani_wmt_en_de_big') def transformer_s2_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture_s2(args) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big') def transformer_vaswani_wmt_en_fr_big(args): args.dropout = getattr(args, 'dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) @register_model_architecture('transformer', 'transformer_wmt_en_de_big') def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, 'attention_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) # default parameters used in tensor2tensor implementation @register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t') def transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args)
encodeMLM
Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)`
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from numpy.random import uniform from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, LayerNorm, MultiheadAttention, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from bert import BertTokenizer DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 from bert import BertModel @register_model('transformer') class TransformerModel(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) @register_model('transformers2') class TransformerS2Model(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerS2Encoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs): """ Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., input feeding/teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad()) bert_encoder_out, _ = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask= ~ bert_encoder_padding_mask) bert_encoder_out = bert_encoder_out[self.bert_output_layer] if self.mask_cls_sep: bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls()) bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep()) bert_encoder_out = bert_encoder_out.permute(1,0,2).contiguous() bert_encoder_out = { 'bert_encoder_out': bert_encoder_out, 'bert_encoder_padding_mask': bert_encoder_padding_mask, } encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out) decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs) return decoder_out @register_model('transformerstack') class TransformerModelStack(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoderStack(args, tgt_dict, embed_tokens) class TransformerEncoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerEncoderLayer(args) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.layer_norm(x) return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerS2Encoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.output_mask = nn.Softmax(dim = 0) self.t_layer = nn.Linear(512, 1) self.output_vocab_linear = nn.Linear(512, embed_tokens.num_embeddings) embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.encoder_layers self.layers = nn.ModuleList([]) self.layers.extend([ TransformerS2EncoderLayer(args, bert_gate=bert_gates[i]) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None self.mask_embedding = nn.init.normal_(nn.Parameter(torch.zeros((1, embed_dim)))) self.mask_layers = nn.ModuleList([]) self.mask_layers.extend([ TransformerEncoderLayer(args) for i in range(2) ]) if args.encoder_normalize_before: self.mask_layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None ''' self.x = None self.unmask_output = None self.mask_output = None self.encoder_vocab_output = None self.backwards = 0 ''' self.i = 0 def forward(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C # T x B mask model ########### ########### ########### ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p.transpose(0, 1) t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1).transpose(0,1) self.mask_output = p if self.training: x = x * p_mask.detach() else: x = x ########### ########### ########### # t_p[t_p>t_p.size*ratio] = 1 # t_p[t_p<=t_p.size*ratio] = 0 # t_p.permute(1,0) # model.encoder.mask_output ''' x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) # if self.training: ''' self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) ''' ''' ########################## if self.i%1==0: import scipy.io as scio self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) scio.savemat("/home/iojhui/bert-nmt/data"+str(self.i)+".mat", {'mask_output':self.mask_output.detach().cpu().numpy(),"src_tokens":src_tokens.cpu().numpy()}) self.i+=1 ######################## ''' return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } # MASKED: encodeMLM function (lines 788-859) def mask(self, src_tokens, x): x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.mask_layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.mask_layer_norm(x) x = self.t_layer(x).squeeze(-1) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.transpose(0,1),value=torch.tensor(float('-inf'))) return self.output_mask(x).transpose(0, 1) def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.decoder_layers print('bert_gates', bert_gates) self.layers = nn.ModuleList([]) decoder_no_bert = getattr(args, 'decoder_no_bert', False) if decoder_no_bert: self.layers.extend([ TransformerStandardDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) else: self.layers.extend([ TransformerDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoderStack(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerDecoderLayerStack(args, no_encoder_attn) for _ in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x, attn_weight = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) self.attn_weight = attn_weight residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerS2EncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, bert_gate=True): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask, bert_encoder_out, bert_encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x1, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x2, _ = self.bert_attn(query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x2, _ = self.bert_attn( query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerStandardDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) # bert_out_dim = args.bert_out_dim # self.bert_attn = MultiheadAttention( # self.embed_dim, args.decoder_attention_heads, # kdim=bert_out_dim, vdim=bert_out_dim, # dropout=args.attention_dropout, encoder_decoder_attention=True # ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) # x2, _ = self.bert_attn( # query=x, # key=bert_encoder_out, # value=bert_encoder_out, # key_padding_mask=bert_encoder_padding_mask, # incremental_state=incremental_state, # static_kv=True, # need_weights=(not self.training and self.need_attn), # ) x1 = F.dropout(x1, p=self.dropout, training=self.training) # x2 = F.dropout(x2, p=self.dropout, training=self.training) # ratios = self.get_ratio() x = residual + x1 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerDecoderLayerStack(nn.Module): def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.bert_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.bert_first = args.bert_first self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) def sinattn(attnlayer, x, layer_norm, keyorvalue, key_padding, incremental_state): residual = x x = self.maybe_layer_norm(layer_norm, x, before=True) x, attn = attnlayer( query=x, key=keyorvalue, value=keyorvalue, key_padding_mask=key_padding, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(layer_norm, x, after=True) return x, attn if self.bert_first: x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) else: x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m @register_model_architecture('transformer', 'transformer') def base_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformers2', 'transformers2') def base_architecture_s2(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformerstack', 'transformerstack') def base_stack_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformer', 'transformer_iwslt_de_en') def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_iwslt_de_en') def transformer_s2_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture_s2(args) @register_model_architecture('transformerstack', 'transformerstack_iwslt_de_en') def transformerstack_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_stack_architecture(args) @register_model_architecture('transformers2', 'transformer_wmt_en_de') def transformer_wmt_en_de(args): base_architecture_s2(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big') def transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_vaswani_wmt_en_de_big') def transformer_s2_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture_s2(args) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big') def transformer_vaswani_wmt_en_fr_big(args): args.dropout = getattr(args, 'dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) @register_model_architecture('transformer', 'transformer_wmt_en_de_big') def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, 'attention_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) # default parameters used in tensor2tensor implementation @register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t') def transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args)
def encodeMLM(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions self.src_tokens = src_tokens x = self.embed_scale * self.embed_tokens(src_tokens) ''' ratio = 0.3 mask = np.random.choice(src_tokens.size()[1], (int(src_tokens.size()[1] * ratio), ),replace = False) if mask is not None: ''' ''' if x.size(1)<10: mask = [4] else: mask = [7,9] x[:, mask] = self.mask_embedding ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1) self.mask_output = p x = x * p_mask.detach() if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) encoder_vocab_output = self.output_vocab_linear(x) self.encoder_vocab_output2 = torch.nn.functional.softmax(encoder_vocab_output,dim=-1) self.token = src_tokens return encoder_vocab_output
788
859
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from numpy.random import uniform from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, LayerNorm, MultiheadAttention, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from bert import BertTokenizer DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 from bert import BertModel @register_model('transformer') class TransformerModel(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) @register_model('transformers2') class TransformerS2Model(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerS2Encoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs): """ Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., input feeding/teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad()) bert_encoder_out, _ = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask= ~ bert_encoder_padding_mask) bert_encoder_out = bert_encoder_out[self.bert_output_layer] if self.mask_cls_sep: bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls()) bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep()) bert_encoder_out = bert_encoder_out.permute(1,0,2).contiguous() bert_encoder_out = { 'bert_encoder_out': bert_encoder_out, 'bert_encoder_padding_mask': bert_encoder_padding_mask, } encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out) decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs) return decoder_out @register_model('transformerstack') class TransformerModelStack(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoderStack(args, tgt_dict, embed_tokens) class TransformerEncoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerEncoderLayer(args) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.layer_norm(x) return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerS2Encoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.output_mask = nn.Softmax(dim = 0) self.t_layer = nn.Linear(512, 1) self.output_vocab_linear = nn.Linear(512, embed_tokens.num_embeddings) embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.encoder_layers self.layers = nn.ModuleList([]) self.layers.extend([ TransformerS2EncoderLayer(args, bert_gate=bert_gates[i]) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None self.mask_embedding = nn.init.normal_(nn.Parameter(torch.zeros((1, embed_dim)))) self.mask_layers = nn.ModuleList([]) self.mask_layers.extend([ TransformerEncoderLayer(args) for i in range(2) ]) if args.encoder_normalize_before: self.mask_layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None ''' self.x = None self.unmask_output = None self.mask_output = None self.encoder_vocab_output = None self.backwards = 0 ''' self.i = 0 def forward(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C # T x B mask model ########### ########### ########### ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p.transpose(0, 1) t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1).transpose(0,1) self.mask_output = p if self.training: x = x * p_mask.detach() else: x = x ########### ########### ########### # t_p[t_p>t_p.size*ratio] = 1 # t_p[t_p<=t_p.size*ratio] = 0 # t_p.permute(1,0) # model.encoder.mask_output ''' x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) # if self.training: ''' self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) ''' ''' ########################## if self.i%1==0: import scipy.io as scio self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) scio.savemat("/home/iojhui/bert-nmt/data"+str(self.i)+".mat", {'mask_output':self.mask_output.detach().cpu().numpy(),"src_tokens":src_tokens.cpu().numpy()}) self.i+=1 ######################## ''' return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def encodeMLM(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions self.src_tokens = src_tokens x = self.embed_scale * self.embed_tokens(src_tokens) ''' ratio = 0.3 mask = np.random.choice(src_tokens.size()[1], (int(src_tokens.size()[1] * ratio), ),replace = False) if mask is not None: ''' ''' if x.size(1)<10: mask = [4] else: mask = [7,9] x[:, mask] = self.mask_embedding ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1) self.mask_output = p x = x * p_mask.detach() if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) encoder_vocab_output = self.output_vocab_linear(x) self.encoder_vocab_output2 = torch.nn.functional.softmax(encoder_vocab_output,dim=-1) self.token = src_tokens return encoder_vocab_output def mask(self, src_tokens, x): x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.mask_layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.mask_layer_norm(x) x = self.t_layer(x).squeeze(-1) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.transpose(0,1),value=torch.tensor(float('-inf'))) return self.output_mask(x).transpose(0, 1) def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.decoder_layers print('bert_gates', bert_gates) self.layers = nn.ModuleList([]) decoder_no_bert = getattr(args, 'decoder_no_bert', False) if decoder_no_bert: self.layers.extend([ TransformerStandardDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) else: self.layers.extend([ TransformerDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoderStack(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerDecoderLayerStack(args, no_encoder_attn) for _ in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x, attn_weight = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) self.attn_weight = attn_weight residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerS2EncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, bert_gate=True): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask, bert_encoder_out, bert_encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x1, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x2, _ = self.bert_attn(query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x2, _ = self.bert_attn( query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerStandardDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) # bert_out_dim = args.bert_out_dim # self.bert_attn = MultiheadAttention( # self.embed_dim, args.decoder_attention_heads, # kdim=bert_out_dim, vdim=bert_out_dim, # dropout=args.attention_dropout, encoder_decoder_attention=True # ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) # x2, _ = self.bert_attn( # query=x, # key=bert_encoder_out, # value=bert_encoder_out, # key_padding_mask=bert_encoder_padding_mask, # incremental_state=incremental_state, # static_kv=True, # need_weights=(not self.training and self.need_attn), # ) x1 = F.dropout(x1, p=self.dropout, training=self.training) # x2 = F.dropout(x2, p=self.dropout, training=self.training) # ratios = self.get_ratio() x = residual + x1 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerDecoderLayerStack(nn.Module): def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.bert_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.bert_first = args.bert_first self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) def sinattn(attnlayer, x, layer_norm, keyorvalue, key_padding, incremental_state): residual = x x = self.maybe_layer_norm(layer_norm, x, before=True) x, attn = attnlayer( query=x, key=keyorvalue, value=keyorvalue, key_padding_mask=key_padding, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(layer_norm, x, after=True) return x, attn if self.bert_first: x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) else: x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m @register_model_architecture('transformer', 'transformer') def base_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformers2', 'transformers2') def base_architecture_s2(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformerstack', 'transformerstack') def base_stack_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformer', 'transformer_iwslt_de_en') def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_iwslt_de_en') def transformer_s2_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture_s2(args) @register_model_architecture('transformerstack', 'transformerstack_iwslt_de_en') def transformerstack_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_stack_architecture(args) @register_model_architecture('transformers2', 'transformer_wmt_en_de') def transformer_wmt_en_de(args): base_architecture_s2(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big') def transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_vaswani_wmt_en_de_big') def transformer_s2_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture_s2(args) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big') def transformer_vaswani_wmt_en_fr_big(args): args.dropout = getattr(args, 'dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) @register_model_architecture('transformer', 'transformer_wmt_en_de_big') def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, 'attention_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) # default parameters used in tensor2tensor implementation @register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t') def transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args)
extract_features
Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from numpy.random import uniform from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, LayerNorm, MultiheadAttention, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from bert import BertTokenizer DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 from bert import BertModel @register_model('transformer') class TransformerModel(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) @register_model('transformers2') class TransformerS2Model(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerS2Encoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs): """ Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., input feeding/teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad()) bert_encoder_out, _ = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask= ~ bert_encoder_padding_mask) bert_encoder_out = bert_encoder_out[self.bert_output_layer] if self.mask_cls_sep: bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls()) bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep()) bert_encoder_out = bert_encoder_out.permute(1,0,2).contiguous() bert_encoder_out = { 'bert_encoder_out': bert_encoder_out, 'bert_encoder_padding_mask': bert_encoder_padding_mask, } encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out) decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs) return decoder_out @register_model('transformerstack') class TransformerModelStack(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoderStack(args, tgt_dict, embed_tokens) class TransformerEncoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerEncoderLayer(args) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.layer_norm(x) return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerS2Encoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.output_mask = nn.Softmax(dim = 0) self.t_layer = nn.Linear(512, 1) self.output_vocab_linear = nn.Linear(512, embed_tokens.num_embeddings) embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.encoder_layers self.layers = nn.ModuleList([]) self.layers.extend([ TransformerS2EncoderLayer(args, bert_gate=bert_gates[i]) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None self.mask_embedding = nn.init.normal_(nn.Parameter(torch.zeros((1, embed_dim)))) self.mask_layers = nn.ModuleList([]) self.mask_layers.extend([ TransformerEncoderLayer(args) for i in range(2) ]) if args.encoder_normalize_before: self.mask_layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None ''' self.x = None self.unmask_output = None self.mask_output = None self.encoder_vocab_output = None self.backwards = 0 ''' self.i = 0 def forward(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C # T x B mask model ########### ########### ########### ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p.transpose(0, 1) t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1).transpose(0,1) self.mask_output = p if self.training: x = x * p_mask.detach() else: x = x ########### ########### ########### # t_p[t_p>t_p.size*ratio] = 1 # t_p[t_p<=t_p.size*ratio] = 0 # t_p.permute(1,0) # model.encoder.mask_output ''' x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) # if self.training: ''' self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) ''' ''' ########################## if self.i%1==0: import scipy.io as scio self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) scio.savemat("/home/iojhui/bert-nmt/data"+str(self.i)+".mat", {'mask_output':self.mask_output.detach().cpu().numpy(),"src_tokens":src_tokens.cpu().numpy()}) self.i+=1 ######################## ''' return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def encodeMLM(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions self.src_tokens = src_tokens x = self.embed_scale * self.embed_tokens(src_tokens) ''' ratio = 0.3 mask = np.random.choice(src_tokens.size()[1], (int(src_tokens.size()[1] * ratio), ),replace = False) if mask is not None: ''' ''' if x.size(1)<10: mask = [4] else: mask = [7,9] x[:, mask] = self.mask_embedding ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1) self.mask_output = p x = x * p_mask.detach() if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) encoder_vocab_output = self.output_vocab_linear(x) self.encoder_vocab_output2 = torch.nn.functional.softmax(encoder_vocab_output,dim=-1) self.token = src_tokens return encoder_vocab_output def mask(self, src_tokens, x): x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.mask_layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.mask_layer_norm(x) x = self.t_layer(x).squeeze(-1) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.transpose(0,1),value=torch.tensor(float('-inf'))) return self.output_mask(x).transpose(0, 1) def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.decoder_layers print('bert_gates', bert_gates) self.layers = nn.ModuleList([]) decoder_no_bert = getattr(args, 'decoder_no_bert', False) if decoder_no_bert: self.layers.extend([ TransformerStandardDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) else: self.layers.extend([ TransformerDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra # MASKED: extract_features function (lines 1029-1087) def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoderStack(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerDecoderLayerStack(args, no_encoder_attn) for _ in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x, attn_weight = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) self.attn_weight = attn_weight residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerS2EncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, bert_gate=True): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask, bert_encoder_out, bert_encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x1, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x2, _ = self.bert_attn(query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x2, _ = self.bert_attn( query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerStandardDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) # bert_out_dim = args.bert_out_dim # self.bert_attn = MultiheadAttention( # self.embed_dim, args.decoder_attention_heads, # kdim=bert_out_dim, vdim=bert_out_dim, # dropout=args.attention_dropout, encoder_decoder_attention=True # ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) # x2, _ = self.bert_attn( # query=x, # key=bert_encoder_out, # value=bert_encoder_out, # key_padding_mask=bert_encoder_padding_mask, # incremental_state=incremental_state, # static_kv=True, # need_weights=(not self.training and self.need_attn), # ) x1 = F.dropout(x1, p=self.dropout, training=self.training) # x2 = F.dropout(x2, p=self.dropout, training=self.training) # ratios = self.get_ratio() x = residual + x1 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerDecoderLayerStack(nn.Module): def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.bert_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.bert_first = args.bert_first self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) def sinattn(attnlayer, x, layer_norm, keyorvalue, key_padding, incremental_state): residual = x x = self.maybe_layer_norm(layer_norm, x, before=True) x, attn = attnlayer( query=x, key=keyorvalue, value=keyorvalue, key_padding_mask=key_padding, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(layer_norm, x, after=True) return x, attn if self.bert_first: x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) else: x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m @register_model_architecture('transformer', 'transformer') def base_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformers2', 'transformers2') def base_architecture_s2(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformerstack', 'transformerstack') def base_stack_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformer', 'transformer_iwslt_de_en') def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_iwslt_de_en') def transformer_s2_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture_s2(args) @register_model_architecture('transformerstack', 'transformerstack_iwslt_de_en') def transformerstack_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_stack_architecture(args) @register_model_architecture('transformers2', 'transformer_wmt_en_de') def transformer_wmt_en_de(args): base_architecture_s2(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big') def transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_vaswani_wmt_en_de_big') def transformer_s2_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture_s2(args) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big') def transformer_vaswani_wmt_en_fr_big(args): args.dropout = getattr(args, 'dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) @register_model_architecture('transformer', 'transformer_wmt_en_de_big') def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, 'attention_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) # default parameters used in tensor2tensor implementation @register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t') def transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args)
def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states}
1,029
1,087
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from numpy.random import uniform from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, LayerNorm, MultiheadAttention, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from bert import BertTokenizer DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 from bert import BertModel @register_model('transformer') class TransformerModel(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) @register_model('transformers2') class TransformerS2Model(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerS2Encoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs): """ Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., input feeding/teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad()) bert_encoder_out, _ = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask= ~ bert_encoder_padding_mask) bert_encoder_out = bert_encoder_out[self.bert_output_layer] if self.mask_cls_sep: bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls()) bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep()) bert_encoder_out = bert_encoder_out.permute(1,0,2).contiguous() bert_encoder_out = { 'bert_encoder_out': bert_encoder_out, 'bert_encoder_padding_mask': bert_encoder_padding_mask, } encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out) decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs) return decoder_out @register_model('transformerstack') class TransformerModelStack(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoderStack(args, tgt_dict, embed_tokens) class TransformerEncoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerEncoderLayer(args) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.layer_norm(x) return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerS2Encoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.output_mask = nn.Softmax(dim = 0) self.t_layer = nn.Linear(512, 1) self.output_vocab_linear = nn.Linear(512, embed_tokens.num_embeddings) embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.encoder_layers self.layers = nn.ModuleList([]) self.layers.extend([ TransformerS2EncoderLayer(args, bert_gate=bert_gates[i]) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None self.mask_embedding = nn.init.normal_(nn.Parameter(torch.zeros((1, embed_dim)))) self.mask_layers = nn.ModuleList([]) self.mask_layers.extend([ TransformerEncoderLayer(args) for i in range(2) ]) if args.encoder_normalize_before: self.mask_layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None ''' self.x = None self.unmask_output = None self.mask_output = None self.encoder_vocab_output = None self.backwards = 0 ''' self.i = 0 def forward(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C # T x B mask model ########### ########### ########### ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p.transpose(0, 1) t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1).transpose(0,1) self.mask_output = p if self.training: x = x * p_mask.detach() else: x = x ########### ########### ########### # t_p[t_p>t_p.size*ratio] = 1 # t_p[t_p<=t_p.size*ratio] = 0 # t_p.permute(1,0) # model.encoder.mask_output ''' x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) # if self.training: ''' self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) ''' ''' ########################## if self.i%1==0: import scipy.io as scio self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) scio.savemat("/home/iojhui/bert-nmt/data"+str(self.i)+".mat", {'mask_output':self.mask_output.detach().cpu().numpy(),"src_tokens":src_tokens.cpu().numpy()}) self.i+=1 ######################## ''' return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def encodeMLM(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions self.src_tokens = src_tokens x = self.embed_scale * self.embed_tokens(src_tokens) ''' ratio = 0.3 mask = np.random.choice(src_tokens.size()[1], (int(src_tokens.size()[1] * ratio), ),replace = False) if mask is not None: ''' ''' if x.size(1)<10: mask = [4] else: mask = [7,9] x[:, mask] = self.mask_embedding ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1) self.mask_output = p x = x * p_mask.detach() if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) encoder_vocab_output = self.output_vocab_linear(x) self.encoder_vocab_output2 = torch.nn.functional.softmax(encoder_vocab_output,dim=-1) self.token = src_tokens return encoder_vocab_output def mask(self, src_tokens, x): x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.mask_layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.mask_layer_norm(x) x = self.t_layer(x).squeeze(-1) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.transpose(0,1),value=torch.tensor(float('-inf'))) return self.output_mask(x).transpose(0, 1) def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.decoder_layers print('bert_gates', bert_gates) self.layers = nn.ModuleList([]) decoder_no_bert = getattr(args, 'decoder_no_bert', False) if decoder_no_bert: self.layers.extend([ TransformerStandardDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) else: self.layers.extend([ TransformerDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoderStack(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerDecoderLayerStack(args, no_encoder_attn) for _ in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x, attn_weight = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) self.attn_weight = attn_weight residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerS2EncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, bert_gate=True): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask, bert_encoder_out, bert_encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x1, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x2, _ = self.bert_attn(query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x2, _ = self.bert_attn( query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerStandardDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) # bert_out_dim = args.bert_out_dim # self.bert_attn = MultiheadAttention( # self.embed_dim, args.decoder_attention_heads, # kdim=bert_out_dim, vdim=bert_out_dim, # dropout=args.attention_dropout, encoder_decoder_attention=True # ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) # x2, _ = self.bert_attn( # query=x, # key=bert_encoder_out, # value=bert_encoder_out, # key_padding_mask=bert_encoder_padding_mask, # incremental_state=incremental_state, # static_kv=True, # need_weights=(not self.training and self.need_attn), # ) x1 = F.dropout(x1, p=self.dropout, training=self.training) # x2 = F.dropout(x2, p=self.dropout, training=self.training) # ratios = self.get_ratio() x = residual + x1 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerDecoderLayerStack(nn.Module): def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.bert_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.bert_first = args.bert_first self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) def sinattn(attnlayer, x, layer_norm, keyorvalue, key_padding, incremental_state): residual = x x = self.maybe_layer_norm(layer_norm, x, before=True) x, attn = attnlayer( query=x, key=keyorvalue, value=keyorvalue, key_padding_mask=key_padding, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(layer_norm, x, after=True) return x, attn if self.bert_first: x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) else: x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m @register_model_architecture('transformer', 'transformer') def base_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformers2', 'transformers2') def base_architecture_s2(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformerstack', 'transformerstack') def base_stack_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformer', 'transformer_iwslt_de_en') def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_iwslt_de_en') def transformer_s2_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture_s2(args) @register_model_architecture('transformerstack', 'transformerstack_iwslt_de_en') def transformerstack_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_stack_architecture(args) @register_model_architecture('transformers2', 'transformer_wmt_en_de') def transformer_wmt_en_de(args): base_architecture_s2(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big') def transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_vaswani_wmt_en_de_big') def transformer_s2_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture_s2(args) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big') def transformer_vaswani_wmt_en_fr_big(args): args.dropout = getattr(args, 'dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) @register_model_architecture('transformer', 'transformer_wmt_en_de_big') def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, 'attention_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) # default parameters used in tensor2tensor implementation @register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t') def transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args)
extract_features
Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from numpy.random import uniform from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, LayerNorm, MultiheadAttention, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from bert import BertTokenizer DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 from bert import BertModel @register_model('transformer') class TransformerModel(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) @register_model('transformers2') class TransformerS2Model(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerS2Encoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs): """ Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., input feeding/teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad()) bert_encoder_out, _ = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask= ~ bert_encoder_padding_mask) bert_encoder_out = bert_encoder_out[self.bert_output_layer] if self.mask_cls_sep: bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls()) bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep()) bert_encoder_out = bert_encoder_out.permute(1,0,2).contiguous() bert_encoder_out = { 'bert_encoder_out': bert_encoder_out, 'bert_encoder_padding_mask': bert_encoder_padding_mask, } encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out) decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs) return decoder_out @register_model('transformerstack') class TransformerModelStack(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoderStack(args, tgt_dict, embed_tokens) class TransformerEncoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerEncoderLayer(args) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.layer_norm(x) return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerS2Encoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.output_mask = nn.Softmax(dim = 0) self.t_layer = nn.Linear(512, 1) self.output_vocab_linear = nn.Linear(512, embed_tokens.num_embeddings) embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.encoder_layers self.layers = nn.ModuleList([]) self.layers.extend([ TransformerS2EncoderLayer(args, bert_gate=bert_gates[i]) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None self.mask_embedding = nn.init.normal_(nn.Parameter(torch.zeros((1, embed_dim)))) self.mask_layers = nn.ModuleList([]) self.mask_layers.extend([ TransformerEncoderLayer(args) for i in range(2) ]) if args.encoder_normalize_before: self.mask_layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None ''' self.x = None self.unmask_output = None self.mask_output = None self.encoder_vocab_output = None self.backwards = 0 ''' self.i = 0 def forward(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C # T x B mask model ########### ########### ########### ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p.transpose(0, 1) t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1).transpose(0,1) self.mask_output = p if self.training: x = x * p_mask.detach() else: x = x ########### ########### ########### # t_p[t_p>t_p.size*ratio] = 1 # t_p[t_p<=t_p.size*ratio] = 0 # t_p.permute(1,0) # model.encoder.mask_output ''' x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) # if self.training: ''' self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) ''' ''' ########################## if self.i%1==0: import scipy.io as scio self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) scio.savemat("/home/iojhui/bert-nmt/data"+str(self.i)+".mat", {'mask_output':self.mask_output.detach().cpu().numpy(),"src_tokens":src_tokens.cpu().numpy()}) self.i+=1 ######################## ''' return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def encodeMLM(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions self.src_tokens = src_tokens x = self.embed_scale * self.embed_tokens(src_tokens) ''' ratio = 0.3 mask = np.random.choice(src_tokens.size()[1], (int(src_tokens.size()[1] * ratio), ),replace = False) if mask is not None: ''' ''' if x.size(1)<10: mask = [4] else: mask = [7,9] x[:, mask] = self.mask_embedding ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1) self.mask_output = p x = x * p_mask.detach() if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) encoder_vocab_output = self.output_vocab_linear(x) self.encoder_vocab_output2 = torch.nn.functional.softmax(encoder_vocab_output,dim=-1) self.token = src_tokens return encoder_vocab_output def mask(self, src_tokens, x): x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.mask_layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.mask_layer_norm(x) x = self.t_layer(x).squeeze(-1) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.transpose(0,1),value=torch.tensor(float('-inf'))) return self.output_mask(x).transpose(0, 1) def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.decoder_layers print('bert_gates', bert_gates) self.layers = nn.ModuleList([]) decoder_no_bert = getattr(args, 'decoder_no_bert', False) if decoder_no_bert: self.layers.extend([ TransformerStandardDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) else: self.layers.extend([ TransformerDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoderStack(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerDecoderLayerStack(args, no_encoder_attn) for _ in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra # MASKED: extract_features function (lines 1232-1290) def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x, attn_weight = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) self.attn_weight = attn_weight residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerS2EncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, bert_gate=True): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask, bert_encoder_out, bert_encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x1, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x2, _ = self.bert_attn(query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x2, _ = self.bert_attn( query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerStandardDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) # bert_out_dim = args.bert_out_dim # self.bert_attn = MultiheadAttention( # self.embed_dim, args.decoder_attention_heads, # kdim=bert_out_dim, vdim=bert_out_dim, # dropout=args.attention_dropout, encoder_decoder_attention=True # ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) # x2, _ = self.bert_attn( # query=x, # key=bert_encoder_out, # value=bert_encoder_out, # key_padding_mask=bert_encoder_padding_mask, # incremental_state=incremental_state, # static_kv=True, # need_weights=(not self.training and self.need_attn), # ) x1 = F.dropout(x1, p=self.dropout, training=self.training) # x2 = F.dropout(x2, p=self.dropout, training=self.training) # ratios = self.get_ratio() x = residual + x1 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerDecoderLayerStack(nn.Module): def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.bert_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.bert_first = args.bert_first self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) def sinattn(attnlayer, x, layer_norm, keyorvalue, key_padding, incremental_state): residual = x x = self.maybe_layer_norm(layer_norm, x, before=True) x, attn = attnlayer( query=x, key=keyorvalue, value=keyorvalue, key_padding_mask=key_padding, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(layer_norm, x, after=True) return x, attn if self.bert_first: x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) else: x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m @register_model_architecture('transformer', 'transformer') def base_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformers2', 'transformers2') def base_architecture_s2(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformerstack', 'transformerstack') def base_stack_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformer', 'transformer_iwslt_de_en') def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_iwslt_de_en') def transformer_s2_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture_s2(args) @register_model_architecture('transformerstack', 'transformerstack_iwslt_de_en') def transformerstack_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_stack_architecture(args) @register_model_architecture('transformers2', 'transformer_wmt_en_de') def transformer_wmt_en_de(args): base_architecture_s2(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big') def transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_vaswani_wmt_en_de_big') def transformer_s2_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture_s2(args) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big') def transformer_vaswani_wmt_en_fr_big(args): args.dropout = getattr(args, 'dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) @register_model_architecture('transformer', 'transformer_wmt_en_de_big') def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, 'attention_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) # default parameters used in tensor2tensor implementation @register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t') def transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args)
def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states}
1,232
1,290
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from numpy.random import uniform from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, LayerNorm, MultiheadAttention, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from bert import BertTokenizer DEFAULT_MAX_SOURCE_POSITIONS = 1024 DEFAULT_MAX_TARGET_POSITIONS = 1024 from bert import BertModel @register_model('transformer') class TransformerModel(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) @register_model('transformers2') class TransformerS2Model(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False, args=None): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep, args) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerS2Encoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoder(args, tgt_dict, embed_tokens) def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs): """ Run the forward pass for an encoder-decoder model. First feed a batch of source tokens through the encoder. Then, feed the encoder output and previous decoder outputs (i.e., input feeding/teacher forcing) to the decoder to produce the next outputs:: encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder(prev_output_tokens, encoder_out) Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (LongTensor): source sentence lengths of shape `(batch)` prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad()) bert_encoder_out, _ = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask= ~ bert_encoder_padding_mask) bert_encoder_out = bert_encoder_out[self.bert_output_layer] if self.mask_cls_sep: bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls()) bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep()) bert_encoder_out = bert_encoder_out.permute(1,0,2).contiguous() bert_encoder_out = { 'bert_encoder_out': bert_encoder_out, 'bert_encoder_padding_mask': bert_encoder_padding_mask, } encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out) decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs) return decoder_out @register_model('transformerstack') class TransformerModelStack(FairseqEncoderDecoderModel): """ Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017) <https://arxiv.org/abs/1706.03762>`_. Args: encoder (TransformerEncoder): the encoder decoder (TransformerDecoder): the decoder The Transformer model provides the following named architectures and command-line arguments: .. argparse:: :ref: fairseq.models.transformer_parser :prog: """ def __init__(self, encoder, decoder, bertencoder, berttokenizer, mask_cls_sep=False): super().__init__(encoder, decoder, bertencoder, berttokenizer, mask_cls_sep) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" # fmt: off parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use') parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.') parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding') parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension') parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN') parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers') parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads') parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block') parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder') parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings') parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings' ' (requires shared dictionary and embed dim)') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. ' 'Must be used with adaptive_loss criterion'), parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') # fmt: on @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure all arguments are present in older models base_architecture(args) if not hasattr(args, 'max_source_positions'): args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS if not hasattr(args, 'max_target_positions'): args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS src_dict, tgt_dict = task.source_dictionary, task.target_dictionary if len(task.datasets) > 0: src_berttokenizer = next(iter(task.datasets.values())).berttokenizer else: src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name) def build_embedding(dictionary, embed_dim, path=None): num_embeddings = len(dictionary) padding_idx = dictionary.pad() emb = Embedding(num_embeddings, embed_dim, padding_idx) # if provided, load from preloaded dictionaries if path: embed_dict = utils.parse_embedding(path) utils.load_embedding(embed_dict, dictionary, emb) return emb if args.share_all_embeddings: if src_dict != tgt_dict: raise ValueError('--share-all-embeddings requires a joined dictionary') if args.encoder_embed_dim != args.decoder_embed_dim: raise ValueError( '--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim') if args.decoder_embed_path and ( args.decoder_embed_path != args.encoder_embed_path): raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path') encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = encoder_embed_tokens args.share_decoder_input_output_embed = True else: encoder_embed_tokens = build_embedding( src_dict, args.encoder_embed_dim, args.encoder_embed_path ) decoder_embed_tokens = build_embedding( tgt_dict, args.decoder_embed_dim, args.decoder_embed_path ) bertencoder = BertModel.from_pretrained(args.bert_model_name) args.bert_out_dim = bertencoder.hidden_size encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens) decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens) return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep) @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return TransformerEncoder(args, src_dict, embed_tokens) @classmethod def build_decoder(cls, args, tgt_dict, embed_tokens): return TransformerDecoderStack(args, tgt_dict, embed_tokens) class TransformerEncoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerEncoderLayer(args) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, src_tokens, src_lengths): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.layer_norm(x) return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerS2Encoder(FairseqEncoder): """ Transformer encoder consisting of *args.encoder_layers* layers. Each layer is a :class:`TransformerEncoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): encoding dictionary embed_tokens (torch.nn.Embedding): input embedding """ def __init__(self, args, dictionary, embed_tokens): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.output_mask = nn.Softmax(dim = 0) self.t_layer = nn.Linear(512, 1) self.output_vocab_linear = nn.Linear(512, embed_tokens.num_embeddings) embed_dim = embed_tokens.embedding_dim self.padding_idx = embed_tokens.padding_idx self.max_source_positions = args.max_source_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) self.embed_positions = PositionalEmbedding( args.max_source_positions, embed_dim, self.padding_idx, learned=args.encoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.encoder_layers self.layers = nn.ModuleList([]) self.layers.extend([ TransformerS2EncoderLayer(args, bert_gate=bert_gates[i]) for i in range(args.encoder_layers) ]) if args.encoder_normalize_before: self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None self.mask_embedding = nn.init.normal_(nn.Parameter(torch.zeros((1, embed_dim)))) self.mask_layers = nn.ModuleList([]) self.mask_layers.extend([ TransformerEncoderLayer(args) for i in range(2) ]) if args.encoder_normalize_before: self.mask_layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None ''' self.x = None self.unmask_output = None self.mask_output = None self.encoder_vocab_output = None self.backwards = 0 ''' self.i = 0 def forward(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions x = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C # T x B mask model ########### ########### ########### ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p.transpose(0, 1) t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1).transpose(0,1) self.mask_output = p if self.training: x = x * p_mask.detach() else: x = x ########### ########### ########### # t_p[t_p>t_p.size*ratio] = 1 # t_p[t_p<=t_p.size*ratio] = 0 # t_p.permute(1,0) # model.encoder.mask_output ''' x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) # if self.training: ''' self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) ''' ''' ########################## if self.i%1==0: import scipy.io as scio self.encoder_vocab_output = self.encodeMLM(src_tokens, src_lengths, bert_encoder_out) scio.savemat("/home/iojhui/bert-nmt/data"+str(self.i)+".mat", {'mask_output':self.mask_output.detach().cpu().numpy(),"src_tokens":src_tokens.cpu().numpy()}) self.i+=1 ######################## ''' return { 'encoder_out': x, # T x B x C 'encoder_padding_mask': encoder_padding_mask, # B x T } def encodeMLM(self, src_tokens, src_lengths, bert_encoder_out): """ Args: src_tokens (LongTensor): tokens in the source language of shape `(batch, src_len)` src_lengths (torch.LongTensor): lengths of each source sentence of shape `(batch)` Returns: dict: - **encoder_out** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)` - **encoder_padding_mask** (ByteTensor): the positions of padding elements of shape `(batch, src_len)` """ # embed tokens and positions self.src_tokens = src_tokens x = self.embed_scale * self.embed_tokens(src_tokens) ''' ratio = 0.3 mask = np.random.choice(src_tokens.size()[1], (int(src_tokens.size()[1] * ratio), ),replace = False) if mask is not None: ''' ''' if x.size(1)<10: mask = [4] else: mask = [7,9] x[:, mask] = self.mask_embedding ''' mask_output = self.mask(src_tokens , x) p = mask_output p = p t_p = torch.argsort(p,dim=1) ratio = 0.2 self.ratio = ratio p_mask = torch.where(t_p<t_p.size(1)*ratio,torch.zeros_like(p),torch.ones_like(p)) self.p_mask = p_mask p_mask = p_mask.unsqueeze(-1) self.mask_output = p x = x * p_mask.detach() if self.embed_positions is not None: x += self.embed_positions(src_tokens) x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.layers: x = layer(x, encoder_padding_mask, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask']) if self.layer_norm: x = self.layer_norm(x) encoder_vocab_output = self.output_vocab_linear(x) self.encoder_vocab_output2 = torch.nn.functional.softmax(encoder_vocab_output,dim=-1) self.token = src_tokens return encoder_vocab_output def mask(self, src_tokens, x): x = x.transpose(0, 1) # compute padding mask encoder_padding_mask = src_tokens.eq(self.padding_idx) if not encoder_padding_mask.any(): encoder_padding_mask = None # encoder layers for layer in self.mask_layers: x = layer(x, encoder_padding_mask) if self.layer_norm: x = self.mask_layer_norm(x) x = self.t_layer(x).squeeze(-1) if encoder_padding_mask is not None: x = x.masked_fill(encoder_padding_mask.transpose(0,1),value=torch.tensor(float('-inf'))) return self.output_mask(x).transpose(0, 1) def reorder_encoder_out(self, encoder_out, bert_outs, new_order): """ Reorder encoder output according to *new_order*. Args: encoder_out: output from the ``forward()`` method new_order (LongTensor): desired order Returns: *encoder_out* rearranged according to *new_order* """ if encoder_out['encoder_out'] is not None: encoder_out['encoder_out'] = \ encoder_out['encoder_out'].index_select(1, new_order) if encoder_out['encoder_padding_mask'] is not None: encoder_out['encoder_padding_mask'] = \ encoder_out['encoder_padding_mask'].index_select(0, new_order) if bert_outs['bert_encoder_out'] is not None: bert_outs['bert_encoder_out'] = \ bert_outs['bert_encoder_out'].index_select(1, new_order) if bert_outs['bert_encoder_padding_mask'] is not None: bert_outs['bert_encoder_padding_mask'] = \ bert_outs['bert_encoder_padding_mask'].index_select(0, new_order) return encoder_out, bert_outs def max_positions(self): """Maximum input length supported by the encoder.""" if self.embed_positions is None: return self.max_source_positions return min(self.max_source_positions, self.embed_positions.max_positions()) def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms self.layers[i].upgrade_state_dict_named(state_dict, "{}.layers.{}".format(name, i)) version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None bert_gates = getattr(args, 'bert_gates', [1, 1, 1, 1, 1, 1]) bert_gates = [x == 1 for x in bert_gates] assert len(bert_gates) == args.decoder_layers print('bert_gates', bert_gates) self.layers = nn.ModuleList([]) decoder_no_bert = getattr(args, 'decoder_no_bert', False) if decoder_no_bert: self.layers.extend([ TransformerStandardDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) else: self.layers.extend([ TransformerDecoderLayer(args, no_encoder_attn, bert_gate=bert_gates[i]) for i in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerDecoderStack(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): super().__init__(dictionary) self.register_buffer('version', torch.Tensor([3])) self.dropout = args.dropout self.share_input_output_embed = args.share_decoder_input_output_embed input_embed_dim = embed_tokens.embedding_dim embed_dim = args.decoder_embed_dim self.output_embed_dim = args.decoder_output_dim padding_idx = embed_tokens.padding_idx self.max_target_positions = args.max_target_positions self.embed_tokens = embed_tokens self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False) if embed_dim != input_embed_dim else None self.embed_positions = PositionalEmbedding( args.max_target_positions, embed_dim, padding_idx, learned=args.decoder_learned_pos, ) if not args.no_token_positional_embeddings else None self.layers = nn.ModuleList([]) self.layers.extend([ TransformerDecoderLayerStack(args, no_encoder_attn) for _ in range(args.decoder_layers) ]) self.adaptive_softmax = None self.project_out_dim = Linear(embed_dim, self.output_embed_dim, bias=False) \ if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights else None if args.adaptive_softmax_cutoff is not None: self.adaptive_softmax = AdaptiveSoftmax( len(dictionary), self.output_embed_dim, options.eval_str_list(args.adaptive_softmax_cutoff, type=int), dropout=args.adaptive_softmax_dropout, adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None, factor=args.adaptive_softmax_factor, tie_proj=args.tie_adaptive_proj, ) elif not self.share_input_output_embed: self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), self.output_embed_dim)) nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) if args.decoder_normalize_before and not getattr(args, 'no_decoder_final_norm', False): self.layer_norm = LayerNorm(embed_dim) else: self.layer_norm = None def forward(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ x, extra = self.extract_features(prev_output_tokens, encoder_out, bert_encoder_out, incremental_state) x = self.output_layer(x) return x, extra def extract_features(self, prev_output_tokens, encoder_out=None, bert_encoder_out=None, incremental_state=None, **unused): """ Similar to *forward* but only return features. Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ # embed positions positions = self.embed_positions( prev_output_tokens, incremental_state=incremental_state, ) if self.embed_positions is not None else None if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions x = F.dropout(x, p=self.dropout, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) attn = None inner_states = [x] # decoder layers for layer in self.layers: x, attn = layer( x, encoder_out['encoder_out'] if encoder_out is not None else None, encoder_out['encoder_padding_mask'] if encoder_out is not None else None, bert_encoder_out['bert_encoder_out'], bert_encoder_out['bert_encoder_padding_mask'], incremental_state, self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None, ) inner_states.append(x) if self.layer_norm: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {'attn': attn, 'inner_states': inner_states} def output_layer(self, features, **kwargs): """Project features to the vocabulary size.""" if self.adaptive_softmax is None: # project back to size of vocabulary if self.share_input_output_embed: return F.linear(features, self.embed_tokens.weight) else: return F.linear(features, self.embed_out) else: return features def max_positions(self): """Maximum output length supported by the decoder.""" if self.embed_positions is None: return self.max_target_positions return min(self.max_target_positions, self.embed_positions.max_positions()) def buffered_future_mask(self, tensor): dim = tensor.size(0) if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device: self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1) if self._future_mask.size(0) < dim: self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1) return self._future_mask[:dim, :dim] def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = '{}.embed_positions.weights'.format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict['{}.embed_positions._float_tensor'.format(name)] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layers.{}.layer_norms.{}.{}'.format(name, i, old, m) if k in state_dict: state_dict['{}.layers.{}.{}.{}'.format(name, i, new, m)] = state_dict[k] del state_dict[k] version_key = '{}.version'.format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x, attn_weight = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) self.attn_weight = attn_weight residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerS2EncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, bert_gate=True): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.encoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.encoder_normalize_before self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = { '0': 'self_attn_layer_norm', '1': 'final_layer_norm' } for old, new in layer_norm_map.items(): for m in ('weight', 'bias'): k = '{}.layer_norms.{}.{}'.format(name, old, m) if k in state_dict: state_dict[ '{}.{}.{}'.format(name, new, m) ] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask, bert_encoder_out, bert_encoder_padding_mask): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) x1, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask) x2, _ = self.bert_attn(query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) return x def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x class TransformerDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = False self.encoder_bert_mixup = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x2, _ = self.bert_attn( query=x, key=bert_encoder_out, value=bert_encoder_out, key_padding_mask=bert_encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x1 = F.dropout(x1, p=self.dropout, training=self.training) x2 = F.dropout(x2, p=self.dropout, training=self.training) ratios = self.get_ratio() x = residual + ratios[0] * x1 + ratios[1] * x2 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerStandardDecoderLayer(nn.Module): """Decoder layer block. In the original paper each operation (multi-head attention, encoder attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.decoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments no_encoder_attn (bool, optional): whether to attend to encoder outputs (default: False). """ def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, bert_gate=True): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) # bert_out_dim = args.bert_out_dim # self.bert_attn = MultiheadAttention( # self.embed_dim, args.decoder_attention_heads, # kdim=bert_out_dim, vdim=bert_out_dim, # dropout=args.attention_dropout, encoder_decoder_attention=True # ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False self.encoder_ratio = args.encoder_ratio self.bert_ratio = args.bert_ratio if not bert_gate: self.bert_ratio = 0. self.encoder_bert_dropout = getattr(args, 'encoder_bert_dropout', False) self.encoder_bert_dropout_ratio = getattr(args, 'encoder_bert_dropout_ratio', 0.25) assert self.encoder_bert_dropout_ratio >= 0. and self.encoder_bert_dropout_ratio <= 0.5 self.encoder_bert_mixup = getattr(args, 'encoder_bert_mixup', False) def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: residual = x x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True) if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) x1, attn = self.encoder_attn( query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) # x2, _ = self.bert_attn( # query=x, # key=bert_encoder_out, # value=bert_encoder_out, # key_padding_mask=bert_encoder_padding_mask, # incremental_state=incremental_state, # static_kv=True, # need_weights=(not self.training and self.need_attn), # ) x1 = F.dropout(x1, p=self.dropout, training=self.training) # x2 = F.dropout(x2, p=self.dropout, training=self.training) # ratios = self.get_ratio() x = residual + x1 x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def get_ratio(self): if self.encoder_bert_dropout: frand = float(uniform(0, 1)) if self.encoder_bert_mixup and self.training: return [frand, 1 - frand] if frand < self.encoder_bert_dropout_ratio and self.training: return [1, 0] elif frand > 1 - self.encoder_bert_dropout_ratio and self.training: return [0, 1] else: return [0.5, 0.5] else: return [self.encoder_ratio, self.bert_ratio] def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn class TransformerDecoderLayerStack(nn.Module): def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = args.decoder_embed_dim self.self_attn = MultiheadAttention( embed_dim=self.embed_dim, num_heads=args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, ) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, 'activation_fn', 'relu') ) self.activation_dropout = getattr(args, 'activation_dropout', 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, 'relu_dropout', 0) self.normalize_before = args.decoder_normalize_before # use layerNorm rather than FusedLayerNorm for exporting. # char_inputs can be used to determint this. # TODO remove this once we update apex with the fix export = getattr(args, 'char_inputs', False) self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) if no_encoder_attn: self.encoder_attn = None self.encoder_attn_layer_norm = None else: self.encoder_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) bert_out_dim = args.bert_out_dim self.bert_attn = MultiheadAttention( self.embed_dim, args.decoder_attention_heads, kdim=bert_out_dim, vdim=bert_out_dim, dropout=args.attention_dropout, encoder_decoder_attention=True ) self.bert_attn_layer_norm = LayerNorm(self.embed_dim, export=export) self.bert_first = args.bert_first self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim) self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, export=export) self.need_attn = True self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def forward( self, x, encoder_out=None, encoder_padding_mask=None, bert_encoder_out=None, bert_encoder_padding_mask=None, incremental_state=None, prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None, self_attn_padding_mask=None, ): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. Returns: encoded output of shape `(batch, src_len, embed_dim)` """ residual = x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True) if prev_self_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_self_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.self_attn._set_input_buffer(incremental_state, saved_state) x, attn = self.self_attn( query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True) if self.encoder_attn is not None: if prev_attn_state is not None: if incremental_state is None: incremental_state = {} prev_key, prev_value = prev_attn_state saved_state = {"prev_key": prev_key, "prev_value": prev_value} self.encoder_attn._set_input_buffer(incremental_state, saved_state) def sinattn(attnlayer, x, layer_norm, keyorvalue, key_padding, incremental_state): residual = x x = self.maybe_layer_norm(layer_norm, x, before=True) x, attn = attnlayer( query=x, key=keyorvalue, value=keyorvalue, key_padding_mask=key_padding, incremental_state=incremental_state, static_kv=True, need_weights=(not self.training and self.need_attn), ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(layer_norm, x, after=True) return x, attn if self.bert_first: x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) else: x, attn = sinattn(self.encoder_attn, x, self.encoder_attn_layer_norm, encoder_out, encoder_padding_mask, incremental_state) x, attn = sinattn(self.bert_attn, x, self.bert_attn_layer_norm, bert_encoder_out, bert_encoder_padding_mask, incremental_state) residual = x x = self.maybe_layer_norm(self.final_layer_norm, x, before=True) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=self.activation_dropout, training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x x = self.maybe_layer_norm(self.final_layer_norm, x, after=True) if self.onnx_trace and incremental_state is not None: saved_state = self.self_attn._get_input_buffer(incremental_state) self_attn_state = saved_state["prev_key"], saved_state["prev_value"] return x, attn, self_attn_state return x, attn def maybe_layer_norm(self, layer_norm, x, before=False, after=False): assert before ^ after if after ^ self.normalize_before: return layer_norm(x) else: return x def make_generation_fast_(self, need_attn=False, **kwargs): self.need_attn = need_attn def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.) return m @register_model_architecture('transformer', 'transformer') def base_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformers2', 'transformers2') def base_architecture_s2(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformerstack', 'transformerstack') def base_stack_architecture(args): args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.attention_dropout = getattr(args, 'attention_dropout', 0.) args.activation_dropout = getattr(args, 'activation_dropout', 0.) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.dropout = getattr(args, 'dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.adaptive_input = getattr(args, 'adaptive_input', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) @register_model_architecture('transformer', 'transformer_iwslt_de_en') def transformer_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_iwslt_de_en') def transformer_s2_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_architecture_s2(args) @register_model_architecture('transformerstack', 'transformerstack_iwslt_de_en') def transformerstack_iwslt_de_en(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4) args.decoder_layers = getattr(args, 'decoder_layers', 6) base_stack_architecture(args) @register_model_architecture('transformers2', 'transformer_wmt_en_de') def transformer_wmt_en_de(args): base_architecture_s2(args) # parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big') def transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture(args) @register_model_architecture('transformers2', 'transformer_s2_vaswani_wmt_en_de_big') def transformer_s2_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.dropout = getattr(args, 'dropout', 0.3) base_architecture_s2(args) @register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big') def transformer_vaswani_wmt_en_fr_big(args): args.dropout = getattr(args, 'dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) @register_model_architecture('transformer', 'transformer_wmt_en_de_big') def transformer_wmt_en_de_big(args): args.attention_dropout = getattr(args, 'attention_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args) # default parameters used in tensor2tensor implementation @register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t') def transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.1) transformer_vaswani_wmt_en_de_big(args)
preprocess_train_input
Pre-process the training data. This is needed because - The label needs to be extended to be used in the loss fn - We need the same inputs for training and eval so adding fake inputs for DUPLICATE_MASK in training data. Args: features: Dictionary of features for training. labels: Training labels. Returns: Processed training features.
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """NCF model input pipeline.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools # pylint: disable=g-bad-import-order import tensorflow.compat.v2 as tf # pylint: enable=g-bad-import-order from utils.recommendation import constants as rconst from utils.recommendation import movielens from utils.recommendation import data_pipeline NUM_SHARDS = 16 def create_dataset_from_tf_record_files(input_file_pattern, pre_batch_size, batch_size, is_training=True): """Creates dataset from (tf)records files for training/evaluation.""" files = tf.data.Dataset.list_files(input_file_pattern, shuffle=is_training) def make_dataset(files_dataset, shard_index): """Returns dataset for sharded tf record files.""" if pre_batch_size != batch_size: raise ValueError("Pre-batch ({}) size is not equal to batch " "size ({})".format(pre_batch_size, batch_size)) files_dataset = files_dataset.shard(NUM_SHARDS, shard_index) dataset = files_dataset.interleave(tf.data.TFRecordDataset) decode_fn = functools.partial( data_pipeline.DatasetManager.deserialize, batch_size=pre_batch_size, is_training=is_training) dataset = dataset.map( decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) return dataset dataset = tf.data.Dataset.range(NUM_SHARDS) map_fn = functools.partial(make_dataset, files) dataset = dataset.interleave( map_fn, cycle_length=NUM_SHARDS, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) return dataset def create_dataset_from_data_producer(producer, params): """Return dataset online-generating data.""" # MASKED: preprocess_train_input function (lines 70-88) train_input_fn = producer.make_input_fn(is_training=True) train_input_dataset = train_input_fn(params).map(preprocess_train_input) def preprocess_eval_input(features): """Pre-process the eval data. This is needed because: - The label needs to be extended to be used in the loss fn - We need the same inputs for training and eval so adding fake inputs for VALID_PT_MASK in eval data. Args: features: Dictionary of features for evaluation. Returns: Processed evaluation features. """ labels = tf.cast(tf.zeros_like( features[movielens.USER_COLUMN]), tf.bool) fake_valid_pt_mask = tf.cast( tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool) features[rconst.VALID_POINT_MASK] = fake_valid_pt_mask features[rconst.TRAIN_LABEL_KEY] = labels return features eval_input_fn = producer.make_input_fn(is_training=False) eval_input_dataset = eval_input_fn(params).map(preprocess_eval_input) return train_input_dataset, eval_input_dataset def create_ncf_input_data(params, producer=None, input_meta_data=None, strategy=None): """Creates NCF training/evaluation dataset. Args: params: Dictionary containing parameters for train/evaluation data. producer: Instance of BaseDataConstructor that generates data online. Must not be None when params['train_dataset_path'] or params['eval_dataset_path'] is not specified. input_meta_data: A dictionary of input metadata to be used when reading data from tf record files. Must be specified when params["train_input_dataset"] is specified. strategy: Distribution strategy used for distributed training. If specified, used to assert that evaluation batch size is correctly a multiple of total number of devices used. Returns: (training dataset, evaluation dataset, train steps per epoch, eval steps per epoch) Raises: ValueError: If data is being generated online for when using TPU's. """ # NCF evaluation metric calculation logic assumes that evaluation data # sample size are in multiples of (1 + number of negative samples in # evaluation) for each device. As so, evaluation batch size must be a # multiple of (number of replicas * (1 + number of negative samples)). num_devices = strategy.num_replicas_in_sync if strategy else 1 if (params["eval_batch_size"] % (num_devices * (1 + rconst.NUM_EVAL_NEGATIVES))): raise ValueError("Evaluation batch size must be divisible by {} " "times {}".format(num_devices, (1 + rconst.NUM_EVAL_NEGATIVES))) if params["train_dataset_path"]: assert params["eval_dataset_path"] train_dataset = create_dataset_from_tf_record_files( params["train_dataset_path"], input_meta_data["train_prebatch_size"], params["batch_size"], is_training=True) eval_dataset = create_dataset_from_tf_record_files( params["eval_dataset_path"], input_meta_data["eval_prebatch_size"], params["eval_batch_size"], is_training=False) num_train_steps = int(input_meta_data["num_train_steps"]) num_eval_steps = int(input_meta_data["num_eval_steps"]) else: if params["use_tpu"]: raise ValueError( "TPU training does not support data producer yet. " "Use pre-processed data.") assert producer # Start retrieving data from producer. train_dataset, eval_dataset = create_dataset_from_data_producer( producer, params) num_train_steps = producer.train_batches_per_epoch num_eval_steps = producer.eval_batches_per_epoch return train_dataset, eval_dataset, num_train_steps, num_eval_steps
def preprocess_train_input(features, labels): """Pre-process the training data. This is needed because - The label needs to be extended to be used in the loss fn - We need the same inputs for training and eval so adding fake inputs for DUPLICATE_MASK in training data. Args: features: Dictionary of features for training. labels: Training labels. Returns: Processed training features. """ fake_dup_mask = tf.zeros_like(features[movielens.USER_COLUMN]) features[rconst.DUPLICATE_MASK] = fake_dup_mask features[rconst.TRAIN_LABEL_KEY] = labels return features
70
88
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """NCF model input pipeline.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools # pylint: disable=g-bad-import-order import tensorflow.compat.v2 as tf # pylint: enable=g-bad-import-order from utils.recommendation import constants as rconst from utils.recommendation import movielens from utils.recommendation import data_pipeline NUM_SHARDS = 16 def create_dataset_from_tf_record_files(input_file_pattern, pre_batch_size, batch_size, is_training=True): """Creates dataset from (tf)records files for training/evaluation.""" files = tf.data.Dataset.list_files(input_file_pattern, shuffle=is_training) def make_dataset(files_dataset, shard_index): """Returns dataset for sharded tf record files.""" if pre_batch_size != batch_size: raise ValueError("Pre-batch ({}) size is not equal to batch " "size ({})".format(pre_batch_size, batch_size)) files_dataset = files_dataset.shard(NUM_SHARDS, shard_index) dataset = files_dataset.interleave(tf.data.TFRecordDataset) decode_fn = functools.partial( data_pipeline.DatasetManager.deserialize, batch_size=pre_batch_size, is_training=is_training) dataset = dataset.map( decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) return dataset dataset = tf.data.Dataset.range(NUM_SHARDS) map_fn = functools.partial(make_dataset, files) dataset = dataset.interleave( map_fn, cycle_length=NUM_SHARDS, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) return dataset def create_dataset_from_data_producer(producer, params): """Return dataset online-generating data.""" def preprocess_train_input(features, labels): """Pre-process the training data. This is needed because - The label needs to be extended to be used in the loss fn - We need the same inputs for training and eval so adding fake inputs for DUPLICATE_MASK in training data. Args: features: Dictionary of features for training. labels: Training labels. Returns: Processed training features. """ fake_dup_mask = tf.zeros_like(features[movielens.USER_COLUMN]) features[rconst.DUPLICATE_MASK] = fake_dup_mask features[rconst.TRAIN_LABEL_KEY] = labels return features train_input_fn = producer.make_input_fn(is_training=True) train_input_dataset = train_input_fn(params).map(preprocess_train_input) def preprocess_eval_input(features): """Pre-process the eval data. This is needed because: - The label needs to be extended to be used in the loss fn - We need the same inputs for training and eval so adding fake inputs for VALID_PT_MASK in eval data. Args: features: Dictionary of features for evaluation. Returns: Processed evaluation features. """ labels = tf.cast(tf.zeros_like( features[movielens.USER_COLUMN]), tf.bool) fake_valid_pt_mask = tf.cast( tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool) features[rconst.VALID_POINT_MASK] = fake_valid_pt_mask features[rconst.TRAIN_LABEL_KEY] = labels return features eval_input_fn = producer.make_input_fn(is_training=False) eval_input_dataset = eval_input_fn(params).map(preprocess_eval_input) return train_input_dataset, eval_input_dataset def create_ncf_input_data(params, producer=None, input_meta_data=None, strategy=None): """Creates NCF training/evaluation dataset. Args: params: Dictionary containing parameters for train/evaluation data. producer: Instance of BaseDataConstructor that generates data online. Must not be None when params['train_dataset_path'] or params['eval_dataset_path'] is not specified. input_meta_data: A dictionary of input metadata to be used when reading data from tf record files. Must be specified when params["train_input_dataset"] is specified. strategy: Distribution strategy used for distributed training. If specified, used to assert that evaluation batch size is correctly a multiple of total number of devices used. Returns: (training dataset, evaluation dataset, train steps per epoch, eval steps per epoch) Raises: ValueError: If data is being generated online for when using TPU's. """ # NCF evaluation metric calculation logic assumes that evaluation data # sample size are in multiples of (1 + number of negative samples in # evaluation) for each device. As so, evaluation batch size must be a # multiple of (number of replicas * (1 + number of negative samples)). num_devices = strategy.num_replicas_in_sync if strategy else 1 if (params["eval_batch_size"] % (num_devices * (1 + rconst.NUM_EVAL_NEGATIVES))): raise ValueError("Evaluation batch size must be divisible by {} " "times {}".format(num_devices, (1 + rconst.NUM_EVAL_NEGATIVES))) if params["train_dataset_path"]: assert params["eval_dataset_path"] train_dataset = create_dataset_from_tf_record_files( params["train_dataset_path"], input_meta_data["train_prebatch_size"], params["batch_size"], is_training=True) eval_dataset = create_dataset_from_tf_record_files( params["eval_dataset_path"], input_meta_data["eval_prebatch_size"], params["eval_batch_size"], is_training=False) num_train_steps = int(input_meta_data["num_train_steps"]) num_eval_steps = int(input_meta_data["num_eval_steps"]) else: if params["use_tpu"]: raise ValueError( "TPU training does not support data producer yet. " "Use pre-processed data.") assert producer # Start retrieving data from producer. train_dataset, eval_dataset = create_dataset_from_data_producer( producer, params) num_train_steps = producer.train_batches_per_epoch num_eval_steps = producer.eval_batches_per_epoch return train_dataset, eval_dataset, num_train_steps, num_eval_steps
preprocess_eval_input
Pre-process the eval data. This is needed because: - The label needs to be extended to be used in the loss fn - We need the same inputs for training and eval so adding fake inputs for VALID_PT_MASK in eval data. Args: features: Dictionary of features for evaluation. Returns: Processed evaluation features.
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """NCF model input pipeline.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools # pylint: disable=g-bad-import-order import tensorflow.compat.v2 as tf # pylint: enable=g-bad-import-order from utils.recommendation import constants as rconst from utils.recommendation import movielens from utils.recommendation import data_pipeline NUM_SHARDS = 16 def create_dataset_from_tf_record_files(input_file_pattern, pre_batch_size, batch_size, is_training=True): """Creates dataset from (tf)records files for training/evaluation.""" files = tf.data.Dataset.list_files(input_file_pattern, shuffle=is_training) def make_dataset(files_dataset, shard_index): """Returns dataset for sharded tf record files.""" if pre_batch_size != batch_size: raise ValueError("Pre-batch ({}) size is not equal to batch " "size ({})".format(pre_batch_size, batch_size)) files_dataset = files_dataset.shard(NUM_SHARDS, shard_index) dataset = files_dataset.interleave(tf.data.TFRecordDataset) decode_fn = functools.partial( data_pipeline.DatasetManager.deserialize, batch_size=pre_batch_size, is_training=is_training) dataset = dataset.map( decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) return dataset dataset = tf.data.Dataset.range(NUM_SHARDS) map_fn = functools.partial(make_dataset, files) dataset = dataset.interleave( map_fn, cycle_length=NUM_SHARDS, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) return dataset def create_dataset_from_data_producer(producer, params): """Return dataset online-generating data.""" def preprocess_train_input(features, labels): """Pre-process the training data. This is needed because - The label needs to be extended to be used in the loss fn - We need the same inputs for training and eval so adding fake inputs for DUPLICATE_MASK in training data. Args: features: Dictionary of features for training. labels: Training labels. Returns: Processed training features. """ fake_dup_mask = tf.zeros_like(features[movielens.USER_COLUMN]) features[rconst.DUPLICATE_MASK] = fake_dup_mask features[rconst.TRAIN_LABEL_KEY] = labels return features train_input_fn = producer.make_input_fn(is_training=True) train_input_dataset = train_input_fn(params).map(preprocess_train_input) # MASKED: preprocess_eval_input function (lines 93-113) eval_input_fn = producer.make_input_fn(is_training=False) eval_input_dataset = eval_input_fn(params).map(preprocess_eval_input) return train_input_dataset, eval_input_dataset def create_ncf_input_data(params, producer=None, input_meta_data=None, strategy=None): """Creates NCF training/evaluation dataset. Args: params: Dictionary containing parameters for train/evaluation data. producer: Instance of BaseDataConstructor that generates data online. Must not be None when params['train_dataset_path'] or params['eval_dataset_path'] is not specified. input_meta_data: A dictionary of input metadata to be used when reading data from tf record files. Must be specified when params["train_input_dataset"] is specified. strategy: Distribution strategy used for distributed training. If specified, used to assert that evaluation batch size is correctly a multiple of total number of devices used. Returns: (training dataset, evaluation dataset, train steps per epoch, eval steps per epoch) Raises: ValueError: If data is being generated online for when using TPU's. """ # NCF evaluation metric calculation logic assumes that evaluation data # sample size are in multiples of (1 + number of negative samples in # evaluation) for each device. As so, evaluation batch size must be a # multiple of (number of replicas * (1 + number of negative samples)). num_devices = strategy.num_replicas_in_sync if strategy else 1 if (params["eval_batch_size"] % (num_devices * (1 + rconst.NUM_EVAL_NEGATIVES))): raise ValueError("Evaluation batch size must be divisible by {} " "times {}".format(num_devices, (1 + rconst.NUM_EVAL_NEGATIVES))) if params["train_dataset_path"]: assert params["eval_dataset_path"] train_dataset = create_dataset_from_tf_record_files( params["train_dataset_path"], input_meta_data["train_prebatch_size"], params["batch_size"], is_training=True) eval_dataset = create_dataset_from_tf_record_files( params["eval_dataset_path"], input_meta_data["eval_prebatch_size"], params["eval_batch_size"], is_training=False) num_train_steps = int(input_meta_data["num_train_steps"]) num_eval_steps = int(input_meta_data["num_eval_steps"]) else: if params["use_tpu"]: raise ValueError( "TPU training does not support data producer yet. " "Use pre-processed data.") assert producer # Start retrieving data from producer. train_dataset, eval_dataset = create_dataset_from_data_producer( producer, params) num_train_steps = producer.train_batches_per_epoch num_eval_steps = producer.eval_batches_per_epoch return train_dataset, eval_dataset, num_train_steps, num_eval_steps
def preprocess_eval_input(features): """Pre-process the eval data. This is needed because: - The label needs to be extended to be used in the loss fn - We need the same inputs for training and eval so adding fake inputs for VALID_PT_MASK in eval data. Args: features: Dictionary of features for evaluation. Returns: Processed evaluation features. """ labels = tf.cast(tf.zeros_like( features[movielens.USER_COLUMN]), tf.bool) fake_valid_pt_mask = tf.cast( tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool) features[rconst.VALID_POINT_MASK] = fake_valid_pt_mask features[rconst.TRAIN_LABEL_KEY] = labels return features
93
113
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """NCF model input pipeline.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools # pylint: disable=g-bad-import-order import tensorflow.compat.v2 as tf # pylint: enable=g-bad-import-order from utils.recommendation import constants as rconst from utils.recommendation import movielens from utils.recommendation import data_pipeline NUM_SHARDS = 16 def create_dataset_from_tf_record_files(input_file_pattern, pre_batch_size, batch_size, is_training=True): """Creates dataset from (tf)records files for training/evaluation.""" files = tf.data.Dataset.list_files(input_file_pattern, shuffle=is_training) def make_dataset(files_dataset, shard_index): """Returns dataset for sharded tf record files.""" if pre_batch_size != batch_size: raise ValueError("Pre-batch ({}) size is not equal to batch " "size ({})".format(pre_batch_size, batch_size)) files_dataset = files_dataset.shard(NUM_SHARDS, shard_index) dataset = files_dataset.interleave(tf.data.TFRecordDataset) decode_fn = functools.partial( data_pipeline.DatasetManager.deserialize, batch_size=pre_batch_size, is_training=is_training) dataset = dataset.map( decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) return dataset dataset = tf.data.Dataset.range(NUM_SHARDS) map_fn = functools.partial(make_dataset, files) dataset = dataset.interleave( map_fn, cycle_length=NUM_SHARDS, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) return dataset def create_dataset_from_data_producer(producer, params): """Return dataset online-generating data.""" def preprocess_train_input(features, labels): """Pre-process the training data. This is needed because - The label needs to be extended to be used in the loss fn - We need the same inputs for training and eval so adding fake inputs for DUPLICATE_MASK in training data. Args: features: Dictionary of features for training. labels: Training labels. Returns: Processed training features. """ fake_dup_mask = tf.zeros_like(features[movielens.USER_COLUMN]) features[rconst.DUPLICATE_MASK] = fake_dup_mask features[rconst.TRAIN_LABEL_KEY] = labels return features train_input_fn = producer.make_input_fn(is_training=True) train_input_dataset = train_input_fn(params).map(preprocess_train_input) def preprocess_eval_input(features): """Pre-process the eval data. This is needed because: - The label needs to be extended to be used in the loss fn - We need the same inputs for training and eval so adding fake inputs for VALID_PT_MASK in eval data. Args: features: Dictionary of features for evaluation. Returns: Processed evaluation features. """ labels = tf.cast(tf.zeros_like( features[movielens.USER_COLUMN]), tf.bool) fake_valid_pt_mask = tf.cast( tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool) features[rconst.VALID_POINT_MASK] = fake_valid_pt_mask features[rconst.TRAIN_LABEL_KEY] = labels return features eval_input_fn = producer.make_input_fn(is_training=False) eval_input_dataset = eval_input_fn(params).map(preprocess_eval_input) return train_input_dataset, eval_input_dataset def create_ncf_input_data(params, producer=None, input_meta_data=None, strategy=None): """Creates NCF training/evaluation dataset. Args: params: Dictionary containing parameters for train/evaluation data. producer: Instance of BaseDataConstructor that generates data online. Must not be None when params['train_dataset_path'] or params['eval_dataset_path'] is not specified. input_meta_data: A dictionary of input metadata to be used when reading data from tf record files. Must be specified when params["train_input_dataset"] is specified. strategy: Distribution strategy used for distributed training. If specified, used to assert that evaluation batch size is correctly a multiple of total number of devices used. Returns: (training dataset, evaluation dataset, train steps per epoch, eval steps per epoch) Raises: ValueError: If data is being generated online for when using TPU's. """ # NCF evaluation metric calculation logic assumes that evaluation data # sample size are in multiples of (1 + number of negative samples in # evaluation) for each device. As so, evaluation batch size must be a # multiple of (number of replicas * (1 + number of negative samples)). num_devices = strategy.num_replicas_in_sync if strategy else 1 if (params["eval_batch_size"] % (num_devices * (1 + rconst.NUM_EVAL_NEGATIVES))): raise ValueError("Evaluation batch size must be divisible by {} " "times {}".format(num_devices, (1 + rconst.NUM_EVAL_NEGATIVES))) if params["train_dataset_path"]: assert params["eval_dataset_path"] train_dataset = create_dataset_from_tf_record_files( params["train_dataset_path"], input_meta_data["train_prebatch_size"], params["batch_size"], is_training=True) eval_dataset = create_dataset_from_tf_record_files( params["eval_dataset_path"], input_meta_data["eval_prebatch_size"], params["eval_batch_size"], is_training=False) num_train_steps = int(input_meta_data["num_train_steps"]) num_eval_steps = int(input_meta_data["num_eval_steps"]) else: if params["use_tpu"]: raise ValueError( "TPU training does not support data producer yet. " "Use pre-processed data.") assert producer # Start retrieving data from producer. train_dataset, eval_dataset = create_dataset_from_data_producer( producer, params) num_train_steps = producer.train_batches_per_epoch num_eval_steps = producer.eval_batches_per_epoch return train_dataset, eval_dataset, num_train_steps, num_eval_steps
output
Output the given rows in tabular format. Each rows is a list of string values. All rows are expected to have the sam elength. The first row is the table header. Parameters ---------- rows: list(string) List of rows in the table
# Copyright (C) 2017-2019 New York University, # University at Buffalo, # Illinois Institute of Technology. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Abstract class for interpreter commands. Each command has to implement two methods: - eval(list(string)): Given a list of tokens check whether the tokens reference the given command. If yes, evaluate the command and return True. Otherwise, return False. - help: Print a simple help statement """ from abc import abstractmethod from typing import List class Command(object): """Abstract class for interpreter commands.""" @abstractmethod def eval(self, tokens: List[str]) -> bool: """If the given tokens sequence matches the given command execute it and return True. Otherwise, return False. Parameters ---------- tokens: list(string) List of tokens in the command line Returns ------- bool """ raise NotImplementedError() @abstractmethod def help(self) -> None: """Print a simple help statement for the command.""" raise NotImplementedError() # MASKED: output function (lines 52-87)
def output(self, rows): """Output the given rows in tabular format. Each rows is a list of string values. All rows are expected to have the sam elength. The first row is the table header. Parameters ---------- rows: list(string) List of rows in the table """ # Determine the longest value for each column. columns = [0] * len(rows[0]) for row in rows: for col in range(len(columns)): count = len(row[col]) if count > columns[col]: columns[col] = count # Create format string format = None divider = list() for col_len in columns: f = '%-' + str(col_len) + 's' if format is None: format = f else: format += ' | ' + f if len(divider) in [0, len(columns) - 1]: i = 1 else: i = 2 divider.append('-' * (col_len + i)) # Print fomrated rows print(format % tuple(rows[0])) print('|'.join(divider)) for row in rows[1:]: print(format % tuple(row))
52
87
# Copyright (C) 2017-2019 New York University, # University at Buffalo, # Illinois Institute of Technology. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Abstract class for interpreter commands. Each command has to implement two methods: - eval(list(string)): Given a list of tokens check whether the tokens reference the given command. If yes, evaluate the command and return True. Otherwise, return False. - help: Print a simple help statement """ from abc import abstractmethod from typing import List class Command(object): """Abstract class for interpreter commands.""" @abstractmethod def eval(self, tokens: List[str]) -> bool: """If the given tokens sequence matches the given command execute it and return True. Otherwise, return False. Parameters ---------- tokens: list(string) List of tokens in the command line Returns ------- bool """ raise NotImplementedError() @abstractmethod def help(self) -> None: """Print a simple help statement for the command.""" raise NotImplementedError() def output(self, rows): """Output the given rows in tabular format. Each rows is a list of string values. All rows are expected to have the sam elength. The first row is the table header. Parameters ---------- rows: list(string) List of rows in the table """ # Determine the longest value for each column. columns = [0] * len(rows[0]) for row in rows: for col in range(len(columns)): count = len(row[col]) if count > columns[col]: columns[col] = count # Create format string format = None divider = list() for col_len in columns: f = '%-' + str(col_len) + 's' if format is None: format = f else: format += ' | ' + f if len(divider) in [0, len(columns) - 1]: i = 1 else: i = 2 divider.append('-' * (col_len + i)) # Print fomrated rows print(format % tuple(rows[0])) print('|'.join(divider)) for row in rows[1:]: print(format % tuple(row))
ProcessFile
Process and write a file of string resources. :param file_name: path to the file to process. :return: None.
# coding=UTF-8 import os import re import sys class BaseStringScript: # State STATE_SEARCHING='STATE_SEARCHING' STATE_IN_STR='STATE_IN_STR' STATE_IN_PLUR='STATE_IN_PLUR' # Tag types TYPE_STR='TYPE_STR' TYPE_PLUR='TYPE_PLUR' # String tag start/end START_STR = '<string' END_STR = '</string' # Plurals tag start/end START_PLUR='<plurals' END_PLUR = '</plurals' def ProcessTag(self, line, type): """ Process a single string tag. :param line: an array of lines making a single string tag. :param type: the tag type, such as TYPE_STR or TYPE_PLUR :return: an array of lines representing the processed tag. """ return line # MASKED: ProcessFile function (lines 36-86) def WriteFile(self, file_name, file_contents): """ Overwrite the contents of a file. :param file_name: path to the file to write. :param file_contents: string containing new file contents. :return: None """ with open(file_name, 'w') as myfile: myfile.write(file_contents)
def ProcessFile(self, file_name): """ Process and write a file of string resources. :param file_name: path to the file to process. :return: None. """ lines = [] state = self.STATE_SEARCHING curr_tag = [] pending_process_type = None with open(file_name, 'r') as myfile: data = myfile.read() for line in data.split('\n'): # Searching for a new tag if state == self.STATE_SEARCHING: if self.START_STR in line: state = self.STATE_IN_STR elif self.START_PLUR in line: state = self.STATE_IN_PLUR else: lines.append(line) # Inside of a string tag if state == self.STATE_IN_STR: curr_tag.append(line) if self.END_STR in line: pending_process_type = self.TYPE_STR # Inside of a plurals tag if state == self.STATE_IN_PLUR: curr_tag.append(line) if self.END_PLUR in line: pending_process_type = self.TYPE_PLUR # Some processing needs doing if pending_process_type: # Do processing lines += self.ProcessTag(curr_tag, pending_process_type) # Reset processing state pending_process_type = None state = self.STATE_SEARCHING curr_tag = [] # Write back to the file self.WriteFile(file_name, '\n'.join(lines))
36
86
# coding=UTF-8 import os import re import sys class BaseStringScript: # State STATE_SEARCHING='STATE_SEARCHING' STATE_IN_STR='STATE_IN_STR' STATE_IN_PLUR='STATE_IN_PLUR' # Tag types TYPE_STR='TYPE_STR' TYPE_PLUR='TYPE_PLUR' # String tag start/end START_STR = '<string' END_STR = '</string' # Plurals tag start/end START_PLUR='<plurals' END_PLUR = '</plurals' def ProcessTag(self, line, type): """ Process a single string tag. :param line: an array of lines making a single string tag. :param type: the tag type, such as TYPE_STR or TYPE_PLUR :return: an array of lines representing the processed tag. """ return line def ProcessFile(self, file_name): """ Process and write a file of string resources. :param file_name: path to the file to process. :return: None. """ lines = [] state = self.STATE_SEARCHING curr_tag = [] pending_process_type = None with open(file_name, 'r') as myfile: data = myfile.read() for line in data.split('\n'): # Searching for a new tag if state == self.STATE_SEARCHING: if self.START_STR in line: state = self.STATE_IN_STR elif self.START_PLUR in line: state = self.STATE_IN_PLUR else: lines.append(line) # Inside of a string tag if state == self.STATE_IN_STR: curr_tag.append(line) if self.END_STR in line: pending_process_type = self.TYPE_STR # Inside of a plurals tag if state == self.STATE_IN_PLUR: curr_tag.append(line) if self.END_PLUR in line: pending_process_type = self.TYPE_PLUR # Some processing needs doing if pending_process_type: # Do processing lines += self.ProcessTag(curr_tag, pending_process_type) # Reset processing state pending_process_type = None state = self.STATE_SEARCHING curr_tag = [] # Write back to the file self.WriteFile(file_name, '\n'.join(lines)) def WriteFile(self, file_name, file_contents): """ Overwrite the contents of a file. :param file_name: path to the file to write. :param file_contents: string containing new file contents. :return: None """ with open(file_name, 'w') as myfile: myfile.write(file_contents)
__init__
Args: backbone: either a backbone module or a mmdet config dict that defines a backbone. The backbone takes a 4D image tensor and returns a sequence of tensors. neck: either a backbone module or a mmdet config dict that defines a neck. The neck takes outputs of backbone and returns a sequence of tensors. If None, no neck is used. pretrained_backbone: defines the backbone weights that can be loaded by mmdet, such as "torchvision://resnet50". output_shapes: shape for every output of the backbone (or neck, if given). stride and channels are often needed. output_names: names for every output of the backbone (or neck, if given). By default, will use "out0", "out1", ...
# -*- coding: utf-8 -*- import itertools import logging import numpy as np from collections import OrderedDict from collections.abc import Mapping from typing import Dict, List, Optional, Tuple, Union import torch from omegaconf import DictConfig, OmegaConf from torch import Tensor, nn from detectron2.layers import ShapeSpec from detectron2.structures import BitMasks, Boxes, ImageList, Instances from detectron2.utils.events import get_event_storage from .backbone import Backbone logger = logging.getLogger(__name__) def _to_container(cfg): """ mmdet will assert the type of dict/list. So convert omegaconf objects to dict/list. """ if isinstance(cfg, DictConfig): cfg = OmegaConf.to_container(cfg, resolve=True) from mmcv.utils import ConfigDict return ConfigDict(cfg) class MMDetBackbone(Backbone): """ Wrapper of mmdetection backbones to use in detectron2. mmdet backbones produce list/tuple of tensors, while detectron2 backbones produce a dict of tensors. This class wraps the given backbone to produce output in detectron2's convention, so it can be used in place of detectron2 backbones. """ # MASKED: __init__ function (lines 44-103) def forward(self, x) -> Dict[str, Tensor]: outs = self.backbone(x) if self.neck is not None: outs = self.neck(outs) assert isinstance( outs, (list, tuple) ), "mmdet backbone should return a list/tuple of tensors!" if len(outs) != len(self._output_shapes): raise ValueError( "Length of output_shapes does not match outputs from the mmdet backbone: " f"{len(outs)} != {len(self._output_shapes)}" ) return {k: v for k, v in zip(self._output_names, outs)} def output_shape(self) -> Dict[str, ShapeSpec]: return {k: v for k, v in zip(self._output_names, self._output_shapes)} class MMDetDetector(nn.Module): """ Wrapper of a mmdetection detector model, for detection and instance segmentation. Input/output formats of this class follow detectron2's convention, so a mmdetection model can be trained and evaluated in detectron2. """ def __init__( self, detector: Union[nn.Module, Mapping], *, # Default is 32 regardless of model: # https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets size_divisibility=32, pixel_mean: Tuple[float], pixel_std: Tuple[float], ): """ Args: detector: a mmdet detector, or a mmdet config dict that defines a detector. size_divisibility: pad input images to multiple of this number pixel_mean: per-channel mean to normalize input image pixel_std: per-channel stddev to normalize input image """ super().__init__() if isinstance(detector, Mapping): from mmdet.models import build_detector detector = build_detector(_to_container(detector)) self.detector = detector self.size_divisibility = size_divisibility self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) assert ( self.pixel_mean.shape == self.pixel_std.shape ), f"{self.pixel_mean} and {self.pixel_std} have different shapes!" def forward(self, batched_inputs: Tuple[Dict[str, torch.Tensor]]): images = [x["image"].to(self.device) for x in batched_inputs] images = [(x - self.pixel_mean) / self.pixel_std for x in images] images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor metas = [] rescale = {"height" in x for x in batched_inputs} if len(rescale) != 1: raise ValueError("Some inputs have original height/width, but some don't!") rescale = list(rescale)[0] output_shapes = [] for input in batched_inputs: meta = {} c, h, w = input["image"].shape meta["img_shape"] = meta["ori_shape"] = (h, w, c) if rescale: scale_factor = np.sqrt(h * w / (input["height"] * input["width"])) ori_shape = (input["height"], input["width"]) output_shapes.append(ori_shape) meta["ori_shape"] = ori_shape + (c,) else: scale_factor = 1.0 output_shapes.append((h, w)) meta["scale_factor"] = scale_factor meta["flip"] = False padh, padw = images.shape[-2:] meta["pad_shape"] = (padh, padw, c) metas.append(meta) if self.training: gt_instances = [x["instances"].to(self.device) for x in batched_inputs] if gt_instances[0].has("gt_masks"): from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks def convert_mask(m, shape): # mmdet mask format if isinstance(m, BitMasks): return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1]) else: return mm_PolygonMasks(m.polygons, shape[0], shape[1]) gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances] else: gt_masks = None losses_and_metrics = self.detector.forward_train( images, metas, [x.gt_boxes.tensor for x in gt_instances], [x.gt_classes for x in gt_instances], gt_masks=gt_masks, ) return _parse_losses(losses_and_metrics) else: results = self.detector.simple_test(images, metas, rescale=rescale) results = [ {"instances": _convert_mmdet_result(r, shape)} for r, shape in zip(results, output_shapes) ] return results @property def device(self): return self.pixel_mean.device # Reference: show_result() in # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances: if isinstance(result, tuple): bbox_result, segm_result = result if isinstance(segm_result, tuple): segm_result = segm_result[0] else: bbox_result, segm_result = result, None bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5 bboxes, scores = bboxes[:, :4], bboxes[:, -1] labels = [ torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result) ] labels = torch.cat(labels) inst = Instances(shape) inst.pred_boxes = Boxes(bboxes) inst.scores = scores inst.pred_classes = labels if segm_result is not None and len(labels) > 0: segm_result = list(itertools.chain(*segm_result)) segm_result = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in segm_result] segm_result = torch.stack(segm_result, dim=0) inst.pred_masks = segm_result return inst # reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py def _parse_losses(losses: Dict[str, Tensor]) -> Dict[str, Tensor]: log_vars = OrderedDict() for loss_name, loss_value in losses.items(): if isinstance(loss_value, torch.Tensor): log_vars[loss_name] = loss_value.mean() elif isinstance(loss_value, list): log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) else: raise TypeError(f"{loss_name} is not a tensor or list of tensors") if "loss" not in loss_name: # put metrics to storage; don't return them storage = get_event_storage() value = log_vars.pop(loss_name).cpu().item() storage.put_scalar(loss_name, value) return log_vars
def __init__( self, backbone: Union[nn.Module, Mapping], neck: Union[nn.Module, Mapping, None] = None, *, pretrained_backbone: Optional[str] = None, output_shapes: List[ShapeSpec], output_names: Optional[List[str]] = None, ): """ Args: backbone: either a backbone module or a mmdet config dict that defines a backbone. The backbone takes a 4D image tensor and returns a sequence of tensors. neck: either a backbone module or a mmdet config dict that defines a neck. The neck takes outputs of backbone and returns a sequence of tensors. If None, no neck is used. pretrained_backbone: defines the backbone weights that can be loaded by mmdet, such as "torchvision://resnet50". output_shapes: shape for every output of the backbone (or neck, if given). stride and channels are often needed. output_names: names for every output of the backbone (or neck, if given). By default, will use "out0", "out1", ... """ super().__init__() if isinstance(backbone, Mapping): from mmdet.models import build_backbone backbone = build_backbone(_to_container(backbone)) self.backbone = backbone if isinstance(neck, Mapping): from mmdet.models import build_neck neck = build_neck(_to_container(neck)) self.neck = neck # It's confusing that backbone weights are given as a separate argument, # but "neck" weights, if any, are part of neck itself. This is the interface # of mmdet so we follow it. Reference: # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py logger.info(f"Initializing mmdet backbone weights: {pretrained_backbone} ...") self.backbone.init_weights(pretrained_backbone) # train() in mmdet modules is non-trivial, and has to be explicitly # called. Reference: # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py self.backbone.train() if self.neck is not None: logger.info("Initializing mmdet neck weights ...") if isinstance(self.neck, nn.Sequential): for m in self.neck: m.init_weights() else: self.neck.init_weights() self.neck.train() self._output_shapes = output_shapes if not output_names: output_names = [f"out{i}" for i in range(len(output_shapes))] self._output_names = output_names
44
103
# -*- coding: utf-8 -*- import itertools import logging import numpy as np from collections import OrderedDict from collections.abc import Mapping from typing import Dict, List, Optional, Tuple, Union import torch from omegaconf import DictConfig, OmegaConf from torch import Tensor, nn from detectron2.layers import ShapeSpec from detectron2.structures import BitMasks, Boxes, ImageList, Instances from detectron2.utils.events import get_event_storage from .backbone import Backbone logger = logging.getLogger(__name__) def _to_container(cfg): """ mmdet will assert the type of dict/list. So convert omegaconf objects to dict/list. """ if isinstance(cfg, DictConfig): cfg = OmegaConf.to_container(cfg, resolve=True) from mmcv.utils import ConfigDict return ConfigDict(cfg) class MMDetBackbone(Backbone): """ Wrapper of mmdetection backbones to use in detectron2. mmdet backbones produce list/tuple of tensors, while detectron2 backbones produce a dict of tensors. This class wraps the given backbone to produce output in detectron2's convention, so it can be used in place of detectron2 backbones. """ def __init__( self, backbone: Union[nn.Module, Mapping], neck: Union[nn.Module, Mapping, None] = None, *, pretrained_backbone: Optional[str] = None, output_shapes: List[ShapeSpec], output_names: Optional[List[str]] = None, ): """ Args: backbone: either a backbone module or a mmdet config dict that defines a backbone. The backbone takes a 4D image tensor and returns a sequence of tensors. neck: either a backbone module or a mmdet config dict that defines a neck. The neck takes outputs of backbone and returns a sequence of tensors. If None, no neck is used. pretrained_backbone: defines the backbone weights that can be loaded by mmdet, such as "torchvision://resnet50". output_shapes: shape for every output of the backbone (or neck, if given). stride and channels are often needed. output_names: names for every output of the backbone (or neck, if given). By default, will use "out0", "out1", ... """ super().__init__() if isinstance(backbone, Mapping): from mmdet.models import build_backbone backbone = build_backbone(_to_container(backbone)) self.backbone = backbone if isinstance(neck, Mapping): from mmdet.models import build_neck neck = build_neck(_to_container(neck)) self.neck = neck # It's confusing that backbone weights are given as a separate argument, # but "neck" weights, if any, are part of neck itself. This is the interface # of mmdet so we follow it. Reference: # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py logger.info(f"Initializing mmdet backbone weights: {pretrained_backbone} ...") self.backbone.init_weights(pretrained_backbone) # train() in mmdet modules is non-trivial, and has to be explicitly # called. Reference: # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py self.backbone.train() if self.neck is not None: logger.info("Initializing mmdet neck weights ...") if isinstance(self.neck, nn.Sequential): for m in self.neck: m.init_weights() else: self.neck.init_weights() self.neck.train() self._output_shapes = output_shapes if not output_names: output_names = [f"out{i}" for i in range(len(output_shapes))] self._output_names = output_names def forward(self, x) -> Dict[str, Tensor]: outs = self.backbone(x) if self.neck is not None: outs = self.neck(outs) assert isinstance( outs, (list, tuple) ), "mmdet backbone should return a list/tuple of tensors!" if len(outs) != len(self._output_shapes): raise ValueError( "Length of output_shapes does not match outputs from the mmdet backbone: " f"{len(outs)} != {len(self._output_shapes)}" ) return {k: v for k, v in zip(self._output_names, outs)} def output_shape(self) -> Dict[str, ShapeSpec]: return {k: v for k, v in zip(self._output_names, self._output_shapes)} class MMDetDetector(nn.Module): """ Wrapper of a mmdetection detector model, for detection and instance segmentation. Input/output formats of this class follow detectron2's convention, so a mmdetection model can be trained and evaluated in detectron2. """ def __init__( self, detector: Union[nn.Module, Mapping], *, # Default is 32 regardless of model: # https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets size_divisibility=32, pixel_mean: Tuple[float], pixel_std: Tuple[float], ): """ Args: detector: a mmdet detector, or a mmdet config dict that defines a detector. size_divisibility: pad input images to multiple of this number pixel_mean: per-channel mean to normalize input image pixel_std: per-channel stddev to normalize input image """ super().__init__() if isinstance(detector, Mapping): from mmdet.models import build_detector detector = build_detector(_to_container(detector)) self.detector = detector self.size_divisibility = size_divisibility self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) assert ( self.pixel_mean.shape == self.pixel_std.shape ), f"{self.pixel_mean} and {self.pixel_std} have different shapes!" def forward(self, batched_inputs: Tuple[Dict[str, torch.Tensor]]): images = [x["image"].to(self.device) for x in batched_inputs] images = [(x - self.pixel_mean) / self.pixel_std for x in images] images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor metas = [] rescale = {"height" in x for x in batched_inputs} if len(rescale) != 1: raise ValueError("Some inputs have original height/width, but some don't!") rescale = list(rescale)[0] output_shapes = [] for input in batched_inputs: meta = {} c, h, w = input["image"].shape meta["img_shape"] = meta["ori_shape"] = (h, w, c) if rescale: scale_factor = np.sqrt(h * w / (input["height"] * input["width"])) ori_shape = (input["height"], input["width"]) output_shapes.append(ori_shape) meta["ori_shape"] = ori_shape + (c,) else: scale_factor = 1.0 output_shapes.append((h, w)) meta["scale_factor"] = scale_factor meta["flip"] = False padh, padw = images.shape[-2:] meta["pad_shape"] = (padh, padw, c) metas.append(meta) if self.training: gt_instances = [x["instances"].to(self.device) for x in batched_inputs] if gt_instances[0].has("gt_masks"): from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks def convert_mask(m, shape): # mmdet mask format if isinstance(m, BitMasks): return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1]) else: return mm_PolygonMasks(m.polygons, shape[0], shape[1]) gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances] else: gt_masks = None losses_and_metrics = self.detector.forward_train( images, metas, [x.gt_boxes.tensor for x in gt_instances], [x.gt_classes for x in gt_instances], gt_masks=gt_masks, ) return _parse_losses(losses_and_metrics) else: results = self.detector.simple_test(images, metas, rescale=rescale) results = [ {"instances": _convert_mmdet_result(r, shape)} for r, shape in zip(results, output_shapes) ] return results @property def device(self): return self.pixel_mean.device # Reference: show_result() in # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances: if isinstance(result, tuple): bbox_result, segm_result = result if isinstance(segm_result, tuple): segm_result = segm_result[0] else: bbox_result, segm_result = result, None bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5 bboxes, scores = bboxes[:, :4], bboxes[:, -1] labels = [ torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result) ] labels = torch.cat(labels) inst = Instances(shape) inst.pred_boxes = Boxes(bboxes) inst.scores = scores inst.pred_classes = labels if segm_result is not None and len(labels) > 0: segm_result = list(itertools.chain(*segm_result)) segm_result = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in segm_result] segm_result = torch.stack(segm_result, dim=0) inst.pred_masks = segm_result return inst # reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py def _parse_losses(losses: Dict[str, Tensor]) -> Dict[str, Tensor]: log_vars = OrderedDict() for loss_name, loss_value in losses.items(): if isinstance(loss_value, torch.Tensor): log_vars[loss_name] = loss_value.mean() elif isinstance(loss_value, list): log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) else: raise TypeError(f"{loss_name} is not a tensor or list of tensors") if "loss" not in loss_name: # put metrics to storage; don't return them storage = get_event_storage() value = log_vars.pop(loss_name).cpu().item() storage.put_scalar(loss_name, value) return log_vars
__init__
Args: detector: a mmdet detector, or a mmdet config dict that defines a detector. size_divisibility: pad input images to multiple of this number pixel_mean: per-channel mean to normalize input image pixel_std: per-channel stddev to normalize input image
# -*- coding: utf-8 -*- import itertools import logging import numpy as np from collections import OrderedDict from collections.abc import Mapping from typing import Dict, List, Optional, Tuple, Union import torch from omegaconf import DictConfig, OmegaConf from torch import Tensor, nn from detectron2.layers import ShapeSpec from detectron2.structures import BitMasks, Boxes, ImageList, Instances from detectron2.utils.events import get_event_storage from .backbone import Backbone logger = logging.getLogger(__name__) def _to_container(cfg): """ mmdet will assert the type of dict/list. So convert omegaconf objects to dict/list. """ if isinstance(cfg, DictConfig): cfg = OmegaConf.to_container(cfg, resolve=True) from mmcv.utils import ConfigDict return ConfigDict(cfg) class MMDetBackbone(Backbone): """ Wrapper of mmdetection backbones to use in detectron2. mmdet backbones produce list/tuple of tensors, while detectron2 backbones produce a dict of tensors. This class wraps the given backbone to produce output in detectron2's convention, so it can be used in place of detectron2 backbones. """ def __init__( self, backbone: Union[nn.Module, Mapping], neck: Union[nn.Module, Mapping, None] = None, *, pretrained_backbone: Optional[str] = None, output_shapes: List[ShapeSpec], output_names: Optional[List[str]] = None, ): """ Args: backbone: either a backbone module or a mmdet config dict that defines a backbone. The backbone takes a 4D image tensor and returns a sequence of tensors. neck: either a backbone module or a mmdet config dict that defines a neck. The neck takes outputs of backbone and returns a sequence of tensors. If None, no neck is used. pretrained_backbone: defines the backbone weights that can be loaded by mmdet, such as "torchvision://resnet50". output_shapes: shape for every output of the backbone (or neck, if given). stride and channels are often needed. output_names: names for every output of the backbone (or neck, if given). By default, will use "out0", "out1", ... """ super().__init__() if isinstance(backbone, Mapping): from mmdet.models import build_backbone backbone = build_backbone(_to_container(backbone)) self.backbone = backbone if isinstance(neck, Mapping): from mmdet.models import build_neck neck = build_neck(_to_container(neck)) self.neck = neck # It's confusing that backbone weights are given as a separate argument, # but "neck" weights, if any, are part of neck itself. This is the interface # of mmdet so we follow it. Reference: # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py logger.info(f"Initializing mmdet backbone weights: {pretrained_backbone} ...") self.backbone.init_weights(pretrained_backbone) # train() in mmdet modules is non-trivial, and has to be explicitly # called. Reference: # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py self.backbone.train() if self.neck is not None: logger.info("Initializing mmdet neck weights ...") if isinstance(self.neck, nn.Sequential): for m in self.neck: m.init_weights() else: self.neck.init_weights() self.neck.train() self._output_shapes = output_shapes if not output_names: output_names = [f"out{i}" for i in range(len(output_shapes))] self._output_names = output_names def forward(self, x) -> Dict[str, Tensor]: outs = self.backbone(x) if self.neck is not None: outs = self.neck(outs) assert isinstance( outs, (list, tuple) ), "mmdet backbone should return a list/tuple of tensors!" if len(outs) != len(self._output_shapes): raise ValueError( "Length of output_shapes does not match outputs from the mmdet backbone: " f"{len(outs)} != {len(self._output_shapes)}" ) return {k: v for k, v in zip(self._output_names, outs)} def output_shape(self) -> Dict[str, ShapeSpec]: return {k: v for k, v in zip(self._output_names, self._output_shapes)} class MMDetDetector(nn.Module): """ Wrapper of a mmdetection detector model, for detection and instance segmentation. Input/output formats of this class follow detectron2's convention, so a mmdetection model can be trained and evaluated in detectron2. """ # MASKED: __init__ function (lines 130-159) def forward(self, batched_inputs: Tuple[Dict[str, torch.Tensor]]): images = [x["image"].to(self.device) for x in batched_inputs] images = [(x - self.pixel_mean) / self.pixel_std for x in images] images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor metas = [] rescale = {"height" in x for x in batched_inputs} if len(rescale) != 1: raise ValueError("Some inputs have original height/width, but some don't!") rescale = list(rescale)[0] output_shapes = [] for input in batched_inputs: meta = {} c, h, w = input["image"].shape meta["img_shape"] = meta["ori_shape"] = (h, w, c) if rescale: scale_factor = np.sqrt(h * w / (input["height"] * input["width"])) ori_shape = (input["height"], input["width"]) output_shapes.append(ori_shape) meta["ori_shape"] = ori_shape + (c,) else: scale_factor = 1.0 output_shapes.append((h, w)) meta["scale_factor"] = scale_factor meta["flip"] = False padh, padw = images.shape[-2:] meta["pad_shape"] = (padh, padw, c) metas.append(meta) if self.training: gt_instances = [x["instances"].to(self.device) for x in batched_inputs] if gt_instances[0].has("gt_masks"): from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks def convert_mask(m, shape): # mmdet mask format if isinstance(m, BitMasks): return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1]) else: return mm_PolygonMasks(m.polygons, shape[0], shape[1]) gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances] else: gt_masks = None losses_and_metrics = self.detector.forward_train( images, metas, [x.gt_boxes.tensor for x in gt_instances], [x.gt_classes for x in gt_instances], gt_masks=gt_masks, ) return _parse_losses(losses_and_metrics) else: results = self.detector.simple_test(images, metas, rescale=rescale) results = [ {"instances": _convert_mmdet_result(r, shape)} for r, shape in zip(results, output_shapes) ] return results @property def device(self): return self.pixel_mean.device # Reference: show_result() in # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances: if isinstance(result, tuple): bbox_result, segm_result = result if isinstance(segm_result, tuple): segm_result = segm_result[0] else: bbox_result, segm_result = result, None bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5 bboxes, scores = bboxes[:, :4], bboxes[:, -1] labels = [ torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result) ] labels = torch.cat(labels) inst = Instances(shape) inst.pred_boxes = Boxes(bboxes) inst.scores = scores inst.pred_classes = labels if segm_result is not None and len(labels) > 0: segm_result = list(itertools.chain(*segm_result)) segm_result = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in segm_result] segm_result = torch.stack(segm_result, dim=0) inst.pred_masks = segm_result return inst # reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py def _parse_losses(losses: Dict[str, Tensor]) -> Dict[str, Tensor]: log_vars = OrderedDict() for loss_name, loss_value in losses.items(): if isinstance(loss_value, torch.Tensor): log_vars[loss_name] = loss_value.mean() elif isinstance(loss_value, list): log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) else: raise TypeError(f"{loss_name} is not a tensor or list of tensors") if "loss" not in loss_name: # put metrics to storage; don't return them storage = get_event_storage() value = log_vars.pop(loss_name).cpu().item() storage.put_scalar(loss_name, value) return log_vars
def __init__( self, detector: Union[nn.Module, Mapping], *, # Default is 32 regardless of model: # https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets size_divisibility=32, pixel_mean: Tuple[float], pixel_std: Tuple[float], ): """ Args: detector: a mmdet detector, or a mmdet config dict that defines a detector. size_divisibility: pad input images to multiple of this number pixel_mean: per-channel mean to normalize input image pixel_std: per-channel stddev to normalize input image """ super().__init__() if isinstance(detector, Mapping): from mmdet.models import build_detector detector = build_detector(_to_container(detector)) self.detector = detector self.size_divisibility = size_divisibility self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) assert ( self.pixel_mean.shape == self.pixel_std.shape ), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
130
159
# -*- coding: utf-8 -*- import itertools import logging import numpy as np from collections import OrderedDict from collections.abc import Mapping from typing import Dict, List, Optional, Tuple, Union import torch from omegaconf import DictConfig, OmegaConf from torch import Tensor, nn from detectron2.layers import ShapeSpec from detectron2.structures import BitMasks, Boxes, ImageList, Instances from detectron2.utils.events import get_event_storage from .backbone import Backbone logger = logging.getLogger(__name__) def _to_container(cfg): """ mmdet will assert the type of dict/list. So convert omegaconf objects to dict/list. """ if isinstance(cfg, DictConfig): cfg = OmegaConf.to_container(cfg, resolve=True) from mmcv.utils import ConfigDict return ConfigDict(cfg) class MMDetBackbone(Backbone): """ Wrapper of mmdetection backbones to use in detectron2. mmdet backbones produce list/tuple of tensors, while detectron2 backbones produce a dict of tensors. This class wraps the given backbone to produce output in detectron2's convention, so it can be used in place of detectron2 backbones. """ def __init__( self, backbone: Union[nn.Module, Mapping], neck: Union[nn.Module, Mapping, None] = None, *, pretrained_backbone: Optional[str] = None, output_shapes: List[ShapeSpec], output_names: Optional[List[str]] = None, ): """ Args: backbone: either a backbone module or a mmdet config dict that defines a backbone. The backbone takes a 4D image tensor and returns a sequence of tensors. neck: either a backbone module or a mmdet config dict that defines a neck. The neck takes outputs of backbone and returns a sequence of tensors. If None, no neck is used. pretrained_backbone: defines the backbone weights that can be loaded by mmdet, such as "torchvision://resnet50". output_shapes: shape for every output of the backbone (or neck, if given). stride and channels are often needed. output_names: names for every output of the backbone (or neck, if given). By default, will use "out0", "out1", ... """ super().__init__() if isinstance(backbone, Mapping): from mmdet.models import build_backbone backbone = build_backbone(_to_container(backbone)) self.backbone = backbone if isinstance(neck, Mapping): from mmdet.models import build_neck neck = build_neck(_to_container(neck)) self.neck = neck # It's confusing that backbone weights are given as a separate argument, # but "neck" weights, if any, are part of neck itself. This is the interface # of mmdet so we follow it. Reference: # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py logger.info(f"Initializing mmdet backbone weights: {pretrained_backbone} ...") self.backbone.init_weights(pretrained_backbone) # train() in mmdet modules is non-trivial, and has to be explicitly # called. Reference: # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py self.backbone.train() if self.neck is not None: logger.info("Initializing mmdet neck weights ...") if isinstance(self.neck, nn.Sequential): for m in self.neck: m.init_weights() else: self.neck.init_weights() self.neck.train() self._output_shapes = output_shapes if not output_names: output_names = [f"out{i}" for i in range(len(output_shapes))] self._output_names = output_names def forward(self, x) -> Dict[str, Tensor]: outs = self.backbone(x) if self.neck is not None: outs = self.neck(outs) assert isinstance( outs, (list, tuple) ), "mmdet backbone should return a list/tuple of tensors!" if len(outs) != len(self._output_shapes): raise ValueError( "Length of output_shapes does not match outputs from the mmdet backbone: " f"{len(outs)} != {len(self._output_shapes)}" ) return {k: v for k, v in zip(self._output_names, outs)} def output_shape(self) -> Dict[str, ShapeSpec]: return {k: v for k, v in zip(self._output_names, self._output_shapes)} class MMDetDetector(nn.Module): """ Wrapper of a mmdetection detector model, for detection and instance segmentation. Input/output formats of this class follow detectron2's convention, so a mmdetection model can be trained and evaluated in detectron2. """ def __init__( self, detector: Union[nn.Module, Mapping], *, # Default is 32 regardless of model: # https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets size_divisibility=32, pixel_mean: Tuple[float], pixel_std: Tuple[float], ): """ Args: detector: a mmdet detector, or a mmdet config dict that defines a detector. size_divisibility: pad input images to multiple of this number pixel_mean: per-channel mean to normalize input image pixel_std: per-channel stddev to normalize input image """ super().__init__() if isinstance(detector, Mapping): from mmdet.models import build_detector detector = build_detector(_to_container(detector)) self.detector = detector self.size_divisibility = size_divisibility self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False) assert ( self.pixel_mean.shape == self.pixel_std.shape ), f"{self.pixel_mean} and {self.pixel_std} have different shapes!" def forward(self, batched_inputs: Tuple[Dict[str, torch.Tensor]]): images = [x["image"].to(self.device) for x in batched_inputs] images = [(x - self.pixel_mean) / self.pixel_std for x in images] images = ImageList.from_tensors(images, size_divisibility=self.size_divisibility).tensor metas = [] rescale = {"height" in x for x in batched_inputs} if len(rescale) != 1: raise ValueError("Some inputs have original height/width, but some don't!") rescale = list(rescale)[0] output_shapes = [] for input in batched_inputs: meta = {} c, h, w = input["image"].shape meta["img_shape"] = meta["ori_shape"] = (h, w, c) if rescale: scale_factor = np.sqrt(h * w / (input["height"] * input["width"])) ori_shape = (input["height"], input["width"]) output_shapes.append(ori_shape) meta["ori_shape"] = ori_shape + (c,) else: scale_factor = 1.0 output_shapes.append((h, w)) meta["scale_factor"] = scale_factor meta["flip"] = False padh, padw = images.shape[-2:] meta["pad_shape"] = (padh, padw, c) metas.append(meta) if self.training: gt_instances = [x["instances"].to(self.device) for x in batched_inputs] if gt_instances[0].has("gt_masks"): from mmdet.core import PolygonMasks as mm_PolygonMasks, BitmapMasks as mm_BitMasks def convert_mask(m, shape): # mmdet mask format if isinstance(m, BitMasks): return mm_BitMasks(m.tensor.cpu().numpy(), shape[0], shape[1]) else: return mm_PolygonMasks(m.polygons, shape[0], shape[1]) gt_masks = [convert_mask(x.gt_masks, x.image_size) for x in gt_instances] else: gt_masks = None losses_and_metrics = self.detector.forward_train( images, metas, [x.gt_boxes.tensor for x in gt_instances], [x.gt_classes for x in gt_instances], gt_masks=gt_masks, ) return _parse_losses(losses_and_metrics) else: results = self.detector.simple_test(images, metas, rescale=rescale) results = [ {"instances": _convert_mmdet_result(r, shape)} for r, shape in zip(results, output_shapes) ] return results @property def device(self): return self.pixel_mean.device # Reference: show_result() in # https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py def _convert_mmdet_result(result, shape: Tuple[int, int]) -> Instances: if isinstance(result, tuple): bbox_result, segm_result = result if isinstance(segm_result, tuple): segm_result = segm_result[0] else: bbox_result, segm_result = result, None bboxes = torch.from_numpy(np.vstack(bbox_result)) # Nx5 bboxes, scores = bboxes[:, :4], bboxes[:, -1] labels = [ torch.full((bbox.shape[0],), i, dtype=torch.int32) for i, bbox in enumerate(bbox_result) ] labels = torch.cat(labels) inst = Instances(shape) inst.pred_boxes = Boxes(bboxes) inst.scores = scores inst.pred_classes = labels if segm_result is not None and len(labels) > 0: segm_result = list(itertools.chain(*segm_result)) segm_result = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in segm_result] segm_result = torch.stack(segm_result, dim=0) inst.pred_masks = segm_result return inst # reference: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/base.py def _parse_losses(losses: Dict[str, Tensor]) -> Dict[str, Tensor]: log_vars = OrderedDict() for loss_name, loss_value in losses.items(): if isinstance(loss_value, torch.Tensor): log_vars[loss_name] = loss_value.mean() elif isinstance(loss_value, list): log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) else: raise TypeError(f"{loss_name} is not a tensor or list of tensors") if "loss" not in loss_name: # put metrics to storage; don't return them storage = get_event_storage() value = log_vars.pop(loss_name).cpu().item() storage.put_scalar(loss_name, value) return log_vars
plot_hpvm_configs
Plot the QoS-speedup information in an HPVM configuration file. It is recommended to profile the config file first (using `profile_configs`) to obtain real speedup numbers. This function creates a `matplotlib.pyplot.Figure`, plots on it, and returns it. :param config_path: Path to the config file (HPVM configuration format). :param save_to: File to save figure into. Default is None: don't save figure (just return it). :param show_qos_loss: Show the loss of QoS on x axis of the figure. Defaults to True. If False, will use (absolute) QoS instead of QoS loss. :param fig_kwargs: Arguments to pass to `plt.subplots`.
from pathlib import Path from subprocess import PIPE, CalledProcessError from typing import Iterable, List, Tuple, Union import matplotlib.pyplot as plt PathLike = Union[Path, str] conf_opening, conf_closing = "+++++", "-----" def profile_config_file( binary_path: PathLike, config_path: PathLike, output_config_path: PathLike, progress_bar: bool = True, profile_filename: str = "profile_info.txt", qos_filename: str = "final_accuracy", ) -> None: r"""Profile an HPVM configuration file with an HPVM binary, and write the updated configuration file to a given location. The configuration file must have the baseline as the first configuration. :param binary_path: Path to binary to be executed in profiling. :param config_path: Path to config file (HPVM configuration format) with configs to enumerate for profiling. :param output_config_path: Path where the output configs are written. The output config file has the same configs as the input `config_path` file, but the performance and energy readings are updated. :param progress_bar: If `True`, show a progress bar for number of configs already profiled. :param profile_filename: Name of profile file generated by the binary (in current directory). This defaults to "profile_info.txt" and should not be changed for HPVM binaries. :param qos_filename: Name of QoS file generated by the binary (in current directory). It contains a single float number as the QoS of this run. This defaults to "final_accuracy" and should not be changed for HPVM binaries. """ # Read first line ("the float") and configs in config file header, configs = read_hpvm_configs(Path(config_path)) if not configs: raise ValueError("Config file with no configs is unsupported.") # Modifies configs in place. profile_configs( binary_path, configs[1:], configs[0], progress_bar, profile_filename, qos_filename, ) write_hpvm_configs(header, configs, Path(output_config_path)) def profile_configs( binary_path: PathLike, configs: Iterable["Config"], baseline_config: "Config", progress_bar: bool = True, profile_filename: str = "profile_info.txt", qos_filename: str = "final_accuracy", ) -> None: """Profile a sequence of HPVM configs. This function modifies argument `configs` in place.""" from tqdm import tqdm baseline_time, baseline_acc = measure_config(binary_path, baseline_config) iterable = tqdm(configs, desc="Configs profiled") if progress_bar else configs for config in iterable: time, acc = measure_config(binary_path, config, profile_filename, qos_filename) speedup = baseline_time / time config.update_profile_results(speedup, acc, baseline_acc) return configs def measure_config( binary_path: PathLike, config: "Config", profile_filename: str = "profile_info.txt", qos_filename: str = "final_accuracy", ): from subprocess import check_call from tempfile import NamedTemporaryFile import os temp_file = NamedTemporaryFile("w") write_hpvm_configs("0.0", [config], Path(temp_file.name)) # Run binary_path binary, # which generates `profile_filename` and `qos_filename` file in cwd. try: with open(os.devnull, "w") as f: check_call([str(binary_path), "-c", str(temp_file.name)], stdout=f) except CalledProcessError as e: print("Output from the program:") print(e.output) raise e time = _read_profile_file(Path(profile_filename)) acc = _read_qos_file(Path(qos_filename)) temp_file.close() return time, acc # MASKED: plot_hpvm_configs function (lines 101-132) class Config: def __init__( self, conf_name: str, speedup: float, energy: float, qos: float, qos_loss: float, config_body: List[str], ): self.conf_name = conf_name self.speedup = speedup self.energy = energy self.qos = qos self.qos_loss = qos_loss # We don't care about the information in this part, and we don't parse this. self.config_body = config_body def update_profile_results(self, speedup: float, qos: float, base_qos: float): recorded_base_qos = self.qos + self.qos_loss if abs(recorded_base_qos - base_qos) > 0.025: raise ValueError( f"Baseline QoS mismatch. Original: {recorded_base_qos}, measured: {base_qos}" ) self.speedup = speedup self.qos = qos self.qos_loss = base_qos - qos def __repr__(self) -> str: header_fields = [ self.conf_name, self.speedup, self.energy, self.qos, self.qos_loss, ] header = " ".join(str(field) for field in header_fields) lines = [conf_opening, header, *self.config_body, conf_closing] return "\n".join(lines) __str__ = __repr__ def read_hpvm_configs(config_file: PathLike) -> Tuple[str, List[Config]]: # def read_hpvm_configs(config_file, config_num, temp_file): ret_configs = [] with open(config_file) as f: text = f.read() # There's 1 float sitting on the first line of config file. # We don't use it, but want to keep that intact. header, *configs = text.split(conf_opening) header = header.strip() for config_text in configs: config_text = config_text.replace(conf_closing, "").strip() config_header, *config_body = config_text.splitlines() conf_name, *number_fields = config_header.split(" ") speedup, energy, qos, qos_drop = [float(s) for s in number_fields] ret_configs.append( Config(conf_name, speedup, energy, qos, qos_drop, config_body) ) return header, ret_configs def write_hpvm_configs(header: str, configs: Iterable[Config], to_file: PathLike): text_segs = [header] + [str(config) for config in configs] with open(to_file, "w") as f: f.write("\n".join(text_segs)) f.flush() def _read_profile_file(profile_file_path: Path): with profile_file_path.open() as f: target_lines = [line.strip() for line in f if "Total Time" in line] if len(target_lines) != 1: raise RuntimeError(f"Profile {profile_file_path} malformed") (target_line,) = target_lines return float(target_line.split()[3]) def _read_qos_file(qos_file_path: Path): with qos_file_path.open() as f: return float(f.read().strip())
def plot_hpvm_configs( config_path: PathLike, save_to: PathLike = None, show_qos_loss: bool = True, **fig_kwargs, ) -> plt.Figure: """ Plot the QoS-speedup information in an HPVM configuration file. It is recommended to profile the config file first (using `profile_configs`) to obtain real speedup numbers. This function creates a `matplotlib.pyplot.Figure`, plots on it, and returns it. :param config_path: Path to the config file (HPVM configuration format). :param save_to: File to save figure into. Default is None: don't save figure (just return it). :param show_qos_loss: Show the loss of QoS on x axis of the figure. Defaults to True. If False, will use (absolute) QoS instead of QoS loss. :param fig_kwargs: Arguments to pass to `plt.subplots`. """ import numpy as np _, configs = read_hpvm_configs(config_path) get_qos = lambda c: c.qos_loss if show_qos_loss else c.qos qos_speedup = np.array([(get_qos(c), c.speedup) for c in configs]) qoses, speedups = qos_speedup.T fig, ax = plt.subplots(**fig_kwargs) ax.scatter(qoses, speedups) ax.set_xlabel("QoS Loss") ax.set_ylabel("Speedup (X)") if save_to: fig.savefig(save_to, dpi=300) return fig
101
132
from pathlib import Path from subprocess import PIPE, CalledProcessError from typing import Iterable, List, Tuple, Union import matplotlib.pyplot as plt PathLike = Union[Path, str] conf_opening, conf_closing = "+++++", "-----" def profile_config_file( binary_path: PathLike, config_path: PathLike, output_config_path: PathLike, progress_bar: bool = True, profile_filename: str = "profile_info.txt", qos_filename: str = "final_accuracy", ) -> None: r"""Profile an HPVM configuration file with an HPVM binary, and write the updated configuration file to a given location. The configuration file must have the baseline as the first configuration. :param binary_path: Path to binary to be executed in profiling. :param config_path: Path to config file (HPVM configuration format) with configs to enumerate for profiling. :param output_config_path: Path where the output configs are written. The output config file has the same configs as the input `config_path` file, but the performance and energy readings are updated. :param progress_bar: If `True`, show a progress bar for number of configs already profiled. :param profile_filename: Name of profile file generated by the binary (in current directory). This defaults to "profile_info.txt" and should not be changed for HPVM binaries. :param qos_filename: Name of QoS file generated by the binary (in current directory). It contains a single float number as the QoS of this run. This defaults to "final_accuracy" and should not be changed for HPVM binaries. """ # Read first line ("the float") and configs in config file header, configs = read_hpvm_configs(Path(config_path)) if not configs: raise ValueError("Config file with no configs is unsupported.") # Modifies configs in place. profile_configs( binary_path, configs[1:], configs[0], progress_bar, profile_filename, qos_filename, ) write_hpvm_configs(header, configs, Path(output_config_path)) def profile_configs( binary_path: PathLike, configs: Iterable["Config"], baseline_config: "Config", progress_bar: bool = True, profile_filename: str = "profile_info.txt", qos_filename: str = "final_accuracy", ) -> None: """Profile a sequence of HPVM configs. This function modifies argument `configs` in place.""" from tqdm import tqdm baseline_time, baseline_acc = measure_config(binary_path, baseline_config) iterable = tqdm(configs, desc="Configs profiled") if progress_bar else configs for config in iterable: time, acc = measure_config(binary_path, config, profile_filename, qos_filename) speedup = baseline_time / time config.update_profile_results(speedup, acc, baseline_acc) return configs def measure_config( binary_path: PathLike, config: "Config", profile_filename: str = "profile_info.txt", qos_filename: str = "final_accuracy", ): from subprocess import check_call from tempfile import NamedTemporaryFile import os temp_file = NamedTemporaryFile("w") write_hpvm_configs("0.0", [config], Path(temp_file.name)) # Run binary_path binary, # which generates `profile_filename` and `qos_filename` file in cwd. try: with open(os.devnull, "w") as f: check_call([str(binary_path), "-c", str(temp_file.name)], stdout=f) except CalledProcessError as e: print("Output from the program:") print(e.output) raise e time = _read_profile_file(Path(profile_filename)) acc = _read_qos_file(Path(qos_filename)) temp_file.close() return time, acc def plot_hpvm_configs( config_path: PathLike, save_to: PathLike = None, show_qos_loss: bool = True, **fig_kwargs, ) -> plt.Figure: """ Plot the QoS-speedup information in an HPVM configuration file. It is recommended to profile the config file first (using `profile_configs`) to obtain real speedup numbers. This function creates a `matplotlib.pyplot.Figure`, plots on it, and returns it. :param config_path: Path to the config file (HPVM configuration format). :param save_to: File to save figure into. Default is None: don't save figure (just return it). :param show_qos_loss: Show the loss of QoS on x axis of the figure. Defaults to True. If False, will use (absolute) QoS instead of QoS loss. :param fig_kwargs: Arguments to pass to `plt.subplots`. """ import numpy as np _, configs = read_hpvm_configs(config_path) get_qos = lambda c: c.qos_loss if show_qos_loss else c.qos qos_speedup = np.array([(get_qos(c), c.speedup) for c in configs]) qoses, speedups = qos_speedup.T fig, ax = plt.subplots(**fig_kwargs) ax.scatter(qoses, speedups) ax.set_xlabel("QoS Loss") ax.set_ylabel("Speedup (X)") if save_to: fig.savefig(save_to, dpi=300) return fig class Config: def __init__( self, conf_name: str, speedup: float, energy: float, qos: float, qos_loss: float, config_body: List[str], ): self.conf_name = conf_name self.speedup = speedup self.energy = energy self.qos = qos self.qos_loss = qos_loss # We don't care about the information in this part, and we don't parse this. self.config_body = config_body def update_profile_results(self, speedup: float, qos: float, base_qos: float): recorded_base_qos = self.qos + self.qos_loss if abs(recorded_base_qos - base_qos) > 0.025: raise ValueError( f"Baseline QoS mismatch. Original: {recorded_base_qos}, measured: {base_qos}" ) self.speedup = speedup self.qos = qos self.qos_loss = base_qos - qos def __repr__(self) -> str: header_fields = [ self.conf_name, self.speedup, self.energy, self.qos, self.qos_loss, ] header = " ".join(str(field) for field in header_fields) lines = [conf_opening, header, *self.config_body, conf_closing] return "\n".join(lines) __str__ = __repr__ def read_hpvm_configs(config_file: PathLike) -> Tuple[str, List[Config]]: # def read_hpvm_configs(config_file, config_num, temp_file): ret_configs = [] with open(config_file) as f: text = f.read() # There's 1 float sitting on the first line of config file. # We don't use it, but want to keep that intact. header, *configs = text.split(conf_opening) header = header.strip() for config_text in configs: config_text = config_text.replace(conf_closing, "").strip() config_header, *config_body = config_text.splitlines() conf_name, *number_fields = config_header.split(" ") speedup, energy, qos, qos_drop = [float(s) for s in number_fields] ret_configs.append( Config(conf_name, speedup, energy, qos, qos_drop, config_body) ) return header, ret_configs def write_hpvm_configs(header: str, configs: Iterable[Config], to_file: PathLike): text_segs = [header] + [str(config) for config in configs] with open(to_file, "w") as f: f.write("\n".join(text_segs)) f.flush() def _read_profile_file(profile_file_path: Path): with profile_file_path.open() as f: target_lines = [line.strip() for line in f if "Total Time" in line] if len(target_lines) != 1: raise RuntimeError(f"Profile {profile_file_path} malformed") (target_line,) = target_lines return float(target_line.split()[3]) def _read_qos_file(qos_file_path: Path): with qos_file_path.open() as f: return float(f.read().strip())
make_test_file
Generate a test file containing nothing but zeroes. If the file size is negative, a random size between 1 and 10 Kb will be chosen. If the file name is empty, a random one will be generated. Returns: name: (str) name of the test file generated size: (int) size of the test file generated
# pylint: disable=too-many-lines import os import random import shutil import time import uuid from retval import RetVal from pycryptostring import CryptoString from pymensago.encryption import EncryptionPair from pymensago.hash import blake2hash from pymensago.serverconn import ServerConnection from integration_setup import login_admin, regcode_admin, setup_test, init_server, init_user, \ init_user2, reset_top_dir from tests.integration.integration_setup import funcname server_response = { 'title' : 'Mensago Server Response', 'type' : 'object', 'required' : [ 'Code', 'Status', 'Info', 'Data' ], 'properties' : { 'Code' : { 'type' : 'integer' }, 'Status' : { 'type' : 'string' }, 'Info' : { 'type' : 'string' }, 'Data' : { 'type' : 'object' } } } # MASKED: make_test_file function (lines 40-64) def setup_testdir(name) -> str: '''Creates a test folder for holding files''' topdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),'testfiles') if not os.path.exists(topdir): os.mkdir(topdir) testdir = os.path.join(topdir, name) while os.path.exists(testdir): try: shutil.rmtree(testdir) except: print("Waiting a second for test folder to unlock") time.sleep(1.0) os.mkdir(testdir) return testdir def test_copy(): '''Tests the COPY command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Set up the directory hierarchy admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']) inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111') os.mkdir(inner_dir) # Subtest #1: Nonexistent source file conn.send_message({ 'Action': 'COPY', 'Data': { 'SourceFile': '/ wsp ' + dbdata['admin_wid'] + ' 1.1.01234567-89ab-cdef-0123-456789abcdef', 'DestDir': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_copy: #1 failed to handle nonexistent source file' # Subtest #2: Nonexistent destination directory # By making this 1MB + 1byte, the file's mere existence will put us over the limit of the 1MB # disk quota status = make_test_file(admin_dir, file_size=0x10_0001) assert not status.error(), 'test_copy: #2 failed to create a test file' testfile1 = status['name'] conn.send_message({ 'Action': 'COPY', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222" } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_copy: #2 failed to handle nonexistent destination dir' # Subtest #3: Source path is a directory conn.send_message({ 'Action': 'COPY', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111" } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_copy: #3 failed to handle directory as source' # Subtest #4: Destination is file path # Normally each file on the system has a unique name, but having a duplicate in this case # won't matter status = make_test_file(inner_dir, 102400, testfile1) conn.send_message({ 'Action': 'COPY', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}" } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_copy: #4 failed to handle file as destination' # Subtest #5: Insufficient quota remaining # The administrator normally can't have a quota. We'll just fix that just for this one test # *heh* # We actually have to do an update instead of an insert because the quota checks in earlier # calls ensure that there is a quota record for admin in the database cur = dbconn.cursor() cur.execute(f"UPDATE quotas SET quota=1 WHERE wid='{dbdata['admin_wid']}'") dbconn.commit() conn.send_message({ 'Action': 'COPY', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111" } }) response = conn.read_response(server_response) assert response['Code'] == 409, 'test_copy: #5 failed to handle quota limit' # We need this to be unlimited for later tests cur = dbconn.cursor() cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'") dbconn.commit() # Subtest #6: Actual success conn.send_message({ 'Action': 'COPY', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111" } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_copy: #6 failed to succeed' conn.disconnect() def test_delete(): '''Test the DELETE command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Subtest #1: Bad path conn.send_message({ 'Action': 'DELETE', 'Data': { 'Path': f"/ wsp {dbdata['admin_wid']} some_dir_name" } }) response = conn.read_response(server_response) assert response['Code'] == 400, f"{funcname()}: failed to handle bad path" # Subtest #2: Directory doesn't exist conn.send_message({ 'Action': 'DELETE', 'Data': { 'Path': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111" } }) response = conn.read_response(server_response) assert response['Code'] == 404, f"{funcname()}: #2 failed to handle nonexistent file" # Subtest #3: Actual success admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']) status = make_test_file(admin_dir) assert not status.error(), f"{funcname()}: #3 failed to create test file" filename = status["name"] conn.send_message({ 'Action': 'DELETE', 'Data': { 'Path': f"/ wsp {dbdata['admin_wid']} {filename}" } }) response = conn.read_response(server_response) assert response['Code'] == 200, f"{funcname()}: #3 failed to delete file" def test_download(): '''This tests the command DOWNLOAD''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) init_user(dbdata, conn) # Subtest #1: Missing parameters conn.send_message({'Action': 'DOWNLOAD','Data': {}}) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_download: #1 failed to handle missing parameter' # Subtest #2: Non-existent path conn.send_message({ 'Action': 'DOWNLOAD', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222' + ' 1000.1000.22222222-2222-2222-2222-222222222222' } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_download: #2 failed to handle non-existent path' # Subtest #3: Actual success status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']), file_size=1000) assert not status.error(), f"test_download: #3 failed to create test file: {status.info}" testname = status['name'] conn.send_message({ 'Action': 'DOWNLOAD', 'Data': { 'Path': f"/ wsp {dbdata['admin_wid']} {testname}" } }) response = conn.read_response(server_response) assert response['Code'] == 100, 'test_download: #3 failed to proceed to file download' assert 'Size' in response['Data'] and response['Data']['Size'] == '1000', \ 'test_download: #3 server failed to respond with file size' conn.send_message({ 'Action': 'DOWNLOAD', 'Data': { 'Path': f"/ wsp {dbdata['admin_wid']} {testname}", 'Size': '1000' } }) rawdata = conn.read() assert len(rawdata) == 1000, 'test_download: #3 downloaded file had wrong length' # Set up an 'interrupted' transfer status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']), file_size=1000) assert not status.error(), f"test_download: #4 failed to create test file: {status.info}" testname = status['name'] # Subtest #7: Resume offset larger than size of data stored server-side conn.send_message({ 'Action': 'DOWNLOAD', 'Data': { 'Path': f"/ wsp {dbdata['admin_wid']} {testname}", 'Offset': '2500' } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_download: #4 failed to handle offset > file size' # Subtest #5: Resume interrupted transfer - exact match conn.send_message({ 'Action': 'DOWNLOAD', 'Data': { 'Path': f"/ wsp {dbdata['admin_wid']} {testname}", 'Offset': '500' } }) response = conn.read_response(server_response) assert response['Code'] == 100, 'test_download: #3 failed to proceed to file download' assert 'Size' in response['Data'] and response['Data']['Size'] == '1000', \ 'test_download: #5 server failed to respond with file size' conn.send_message({ 'Action': 'DOWNLOAD', 'Data': { 'Path': f"/ wsp {dbdata['admin_wid']} {testname}", 'Offset': '500', 'Size': '1000' } }) rawdata = conn.read() assert len(rawdata) == 500, 'test_download: #5 resumed data had wrong length' assert blake2hash((('0' * 500) + rawdata).encode()) == \ 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', \ 'test_download: #8 resumed file hash failure' conn.disconnect() def test_getquotainfo(): '''This tests the command GETQUOTAINFO, which gets both the quota for the workspace and the disk usage''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) init_user(dbdata, conn) status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']), file_size=1000) assert not status.error(), f"Failed to create test workspace file: {status.info}" conn.send_message({ 'Action': 'GETQUOTAINFO', 'Data': {} }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_getquotainfo: failed to get quota information' assert response['Data']['DiskUsage'] == '1000', 'test_getquotainfo: disk usage was incorrect' assert response['Data']['QuotaSize'] == '0', \ "test_getquotainfo: admin quota wasn't unlimited" conn.disconnect() def test_list(): '''Tests the LIST command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Subtest #1: Nonexistent path conn.send_message({ 'Action': 'LIST', 'Data': { 'Path': '/ 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_list: #1 failed to handle missing path' # Subtest #2: Path is a file admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']) status = make_test_file(admin_dir) assert not status.error(), "test_list: #2 failed to create test file" conn.send_message({ 'Action': 'LIST', 'Data': { 'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']]) } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_list: #2 failed to handle path as file' # Subtest #3: Empty directory os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')) conn.send_message({ 'Action': 'LIST', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_list: #3 failed to handle empty directory' assert 'Files' in response['Data'] and len(response['Data']['Files']) == 0, \ 'test_list: #3 failed to have empty response for empty directory' # Subtest #4: A list of files for i in range(1,6): tempname = '.'.join([str(1000 * i), '500', str(uuid.uuid4())]) try: fhandle = open(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111', tempname), 'w') except Exception as e: assert False, 'test_list: #4 failed to create test files: ' + e fhandle.write('0' * 500) fhandle.close() conn.send_message({ 'Action': 'LIST', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_list: #4 failed to handle non-empty directory' assert 'Files' in response['Data'] and len(response['Data']['Files']) == 5, \ 'test_list: #4 failed to list all files in directory' # Subtest #5: A list of files with time specifier conn.send_message({ 'Action': 'LIST', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111', 'Time': '3000' } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_list: #5 failed to handle non-empty directory' assert 'Files' in response['Data'] and len(response['Data']['Files']) == 3, \ 'test_list: #5 failed to filter files' conn.disconnect() def test_listdirs(): '''Tests the LISTDIRS command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Subtest #1: Nonexistent path conn.send_message({ 'Action': 'LISTDIRS', 'Data': { 'Path': '/ 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_listdirs: #1 failed to handle missing path' # Subtest #2: Path is a file admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']) status = make_test_file(admin_dir) assert not status.error(), "test_listdirs: #2 failed to create test file" conn.send_message({ 'Action': 'LIST', 'Data': { 'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']]) } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_listdirs: #2 failed to handle path as file' # Subtest #3: Empty directory os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')) conn.send_message({ 'Action': 'LISTDIRS', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_listdirs: #3 failed to handle empty directory' assert 'Directories' in response['Data'] and len(response['Data']['Directories']) == 0, \ 'test_listdirs: #3 failed to have empty response for empty directory' # Subtest #4: A list of directories for i in range(2,7): tempname = '-'.join([(str(i) * 8), (str(i) * 4), (str(i) * 4), (str(i) * 4), (str(i) * 12)]) try: os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111', tempname)) except Exception as e: assert False, 'test_listdirs: #4 failed to create test directories: ' + e make_test_file(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')) conn.send_message({ 'Action': 'LISTDIRS', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_listdirs: #4 failed to handle non-empty directory' assert 'Directories' in response['Data'] and len(response['Data']['Directories']) == 5, \ 'test_list: #4 failed to list all subdirectories' conn.disconnect() def test_mkdir(): '''Tests the MKDIR command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Subtest #1: Bad directory name conn.send_message({ 'Action': 'MKDIR', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' some_dir_name' } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_mkdir: #1 failed to handle bad path' # Subtest #2: Actual success - 1 directory conn.send_message({ 'Action': 'MKDIR', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_mkdir: #2 failed to create legitimate directory' # Subtest #3: Directory already exists conn.send_message({ 'Action': 'MKDIR', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 408, 'test_mkdir: #3 failed to handle existing directory' # Subtest #4: Actual success - nested directories multipath = ' '.join(['/', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444', '55555555-5555-5555-5555-555555555555' ]) conn.send_message({ 'Action': 'MKDIR', 'Data': { 'Path': multipath } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_mkdir: #2 failed to create legitimate directory' conn.disconnect() def test_move(): '''Tests the MOVE command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Set up the directory hierarchy admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']) inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111') os.mkdir(inner_dir) # Subtest #1: Nonexistent source file conn.send_message({ 'Action': 'MOVE', 'Data': { 'SourceFile': '/ ' + dbdata['admin_wid'] + ' 1.1.01234567-89ab-cdef-0123-456789abcdef', 'DestDir': '/ ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_move: #1 failed to handle nonexistent source file' # Subtest #2: Nonexistent destination directory status = make_test_file(admin_dir) assert not status.error(), 'test_move: #2 failed to create a test file' testfile1 = status['name'] conn.send_message({ 'Action': 'MOVE', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222" } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_move: #2 failed to handle nonexistent destination dir' # Subtest #3: Source path is a directory conn.send_message({ 'Action': 'MOVE', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111" } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_move: #3 failed to handle directory as source' # Subtest #4: Destination is file path # Normally each file on the system has a unique name, but having a duplicate in this case # won't matter status = make_test_file(inner_dir, 102400, testfile1) conn.send_message({ 'Action': 'MOVE', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}" } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_copy: #4 failed to handle file as destination' os.remove(os.path.join(inner_dir, status['name'])) # Subtest #5: Actual success conn.send_message({ 'Action': 'MOVE', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111" } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_copy: #6 failed to succeed' conn.disconnect() def test_replace(): '''Test the REPLACE command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Subtest #1: Bad old file path conn.send_message({ 'Action': 'REPLACE', 'Data': { 'OldPath': f"/ wsp {dbdata['admin_wid']} some_dir_name", 'NewPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111", 'Size': "1234", 'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT' } }) response = conn.read_response(server_response) assert response['Code'] == 400, f"{funcname()}: #1 failed to handle bad old file path" admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']) status = make_test_file(admin_dir) filename = status['name'] # Subtest #2: Bad new file path conn.send_message({ 'Action': 'REPLACE', 'Data': { 'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}", 'NewPath': f"/ wsp {dbdata['admin_wid']} some_dir_name", 'Size': "1234", 'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT' } }) response = conn.read_response(server_response) assert response['Code'] == 400, f"{funcname()}: #2 failed to handle bad new file path" # Subtest #4: Destination directory doesn't exist conn.send_message({ 'Action': 'REPLACE', 'Data': { 'OldPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111", 'NewPath': "/ wsp 11111111-1111-1111-1111-111111111111", 'Size': "4321", 'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT' } }) response = conn.read_response(server_response) assert response['Code'] == 404, f"{funcname()}: #4 failed to handle nonexistent destination dir" # Subtest #5: Actual success status = make_test_file(admin_dir) assert not status.error(), f"{funcname()}: #3 failed to create test file" filename = status["name"] conn.send_message({ 'Action': 'REPLACE', 'Data': { 'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}", 'NewPath': f"/ wsp {dbdata['admin_wid']}", 'Size': "1000", 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp' } }) response = conn.read_response(server_response) assert response['Code'] == 100, f'{funcname()}: #6 failed to proceed to file upload' conn.write('0' * 1000) response = conn.read_response(server_response) assert response['Code'] == 200, f'{funcname()}: #6 failed to replace file' conn.disconnect() def test_rmdir(): '''Tests the RMDIR command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Subtest #1: Bad directory name conn.send_message({ 'Action': 'RMDIR', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' some_dir_name', 'Recursive': 'False' } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_rmdir: #1 failed to handle bad path' # Subtest #2: Directory doesn't exist conn.send_message({ 'Action': 'RMDIR', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111', 'Recursive': 'False' } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_rmdir: #2 failed to handle nonexistent directory' # Subtest #3: Call fails because of non-empty directory multipath = ' '.join(['/ wsp', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444', '55555555-5555-5555-5555-555555555555' ]) conn.send_message({ 'Action': 'MKDIR', 'Data': { 'Path': multipath } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_rmdir: #3 failed to create test hierarchy' conn.send_message({ 'Action': 'RMDIR', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222', 'Recursive': 'False' } }) response = conn.read_response(server_response) assert response['Code'] == 408, 'test_rmdir: #3 failed to handle non-empty directory' # Subtest #4: Actual success - non-recursively remove an empty directory conn.send_message({ 'Action': 'RMDIR', 'Data': { 'Path': multipath } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_rmdir: #4 failed to remove an empty directory' def test_select(): '''Tests the SELECT command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Subtest #1: Nonexistent path conn.send_message({ 'Action': 'SELECT', 'Data': { 'Path': '/ 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_select: #1 failed to handle missing path' # Subtest #2: Path is a file admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']) status = make_test_file(admin_dir) assert not status.error(), "test_select: #2 failed to create test file" conn.send_message({ 'Action': 'SELECT', 'Data': { 'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']]) } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_select: #2 failed to handle path as file' # Subtest #3: Actual success innerpath = ' '.join(['/ wsp', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222']) conn.send_message({ 'Action': 'MKDIR', 'Data': { 'Path': innerpath } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_select: #3 failed to create test directory' conn.send_message({ 'Action': 'SELECT', 'Data': { 'Path': innerpath } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_select: #3 failed to work correctly' conn.disconnect() def test_setquota(): '''Tests the SETQUOTA command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) init_user(dbdata, conn) init_user2(dbdata, conn) # Subtest #1: Bad sizes conn.send_message({ 'Action': 'SETQUOTA', 'Data': { 'Size': '0', 'Workspaces': '33333333-3333-3333-3333-333333333333' } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_setquota: failed to handle bad size value' conn.send_message({ 'Action': 'SETQUOTA', 'Data': { 'Size': "Real programmers don't eat quiche ;)", 'Workspaces': '33333333-3333-3333-3333-333333333333' } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_setquota: failed to handle bad size data type' # Subtest #2: Bad workspace list conn.send_message({ 'Action': 'SETQUOTA', 'Data': { 'Size': "4096", 'Workspaces': '33333333-3333-3333-3333-333333333333,' } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_setquota: failed to handle bad workspace list' # Subtest #3: Actual success conn.send_message({ 'Action': 'SETQUOTA', 'Data': { 'Size': "4096", 'Workspaces': '33333333-3333-3333-3333-333333333333, ' \ '44444444-4444-4444-4444-444444444444' } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_setquota: failed to handle actual success' conn.disconnect() def test_upload(): '''Tests the UPLOAD command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) init_user(dbdata, conn) # Subtest #1: Missing parameters conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': '1000', # Hash parameter is missing 'Path': '/ wsp ' + dbdata['admin_wid'] } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_upload: #1 failed to handle missing parameter' # Subtest #2: Non-existent path conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': '1000', 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222' } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_upload: #2 failed to handle non-existent path' # Subtest #3: Size too big conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(0x4000_0000 * 200), # 200GiB isn't all that big :P 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'] } }) response = conn.read_response(server_response) assert response['Code'] == 414, 'test_upload: #3 failed to handle file too big' # Subtest #4: Insufficient quota remaining # The administrator normally can't have a quota. We'll just fix that just for this one test # *heh* # Normally in Python direct string substitution is a recipe for SQL injection. We're not # bringing in any insecure code here, so it's only a little bit bad. cur = dbconn.cursor() cur.execute(f"INSERT INTO quotas(wid, usage, quota) VALUES('{dbdata['admin_wid']}', 5100 , 5120)") dbconn.commit() conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(0x10_0000 * 30), # 30MiB 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'] } }) response = conn.read_response(server_response) assert response['Code'] == 409, 'test_upload: #4 quota check failed' # We need this to be unlimited for later tests cur = dbconn.cursor() cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'") dbconn.commit() # Subtest #5: Hash mismatch conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(1000), 'Hash': r'BLAKE2B-256:5(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'] } }) response = conn.read_response(server_response) assert response['Code'] == 100, 'test_upload: #5 failed to proceed to file upload' conn.write('0' * 1000) response = conn.read_response(server_response) assert response['Code'] == 410, 'test_upload: #5 failed to handle file hash mismatch' # Subtest #6: Actual success conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(1000), 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'] } }) response = conn.read_response(server_response) assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload' conn.write('0' * 1000) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_upload: #6 failed to handle file hash mismatch' # Set up an interrupted transfer conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(1000), 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'] } }) response = conn.read_response(server_response) tempFileName = response['Data']['TempName'] assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload' assert tempFileName != '', 'test_upload: #6 server failed to return temp file name' conn.write('0' * 500) del conn conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" login_admin(dbdata, conn) # Subtest #7: Resume offset larger than size of data stored server-side conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(1000), 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'], 'TempName': tempFileName, 'Offset': '2000' } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_upload: #7 failed to handle offset > file size' # Subtest #8: Resume interrupted transfer - exact match conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(1000), 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'], 'TempName': tempFileName, 'Offset': '500' } }) response = conn.read_response(server_response) assert response['Code'] == 100, 'test_upload: #8 failed to proceed to file upload' conn.write('0' * 500) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_upload: #8 failed to resume with exact offset match' # Set up one last interrupted transfer conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(1000), 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'] } }) response = conn.read_response(server_response) tempFileName = response['Data']['TempName'] assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload' assert tempFileName != '', 'test_upload: #6 server failed to return temp file name' conn.write('0' * 500) del conn conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" login_admin(dbdata, conn) # Subtest #9: Overlapping resume conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(1000), 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'], 'TempName': tempFileName, 'Offset': '400' } }) response = conn.read_response(server_response) assert response['Code'] == 100, 'test_upload: #9 failed to proceed to file upload' conn.write('0' * 600) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_upload: #9 failed to resume with overlapping offset' conn.disconnect() if __name__ == '__main__': # test_copy() # test_delete() # test_download() # test_getquotainfo() # test_list() # test_listdirs() # test_mkdir() # test_move() test_replace() # test_rmdir() # test_setquota() # test_select() # test_upload()
def make_test_file(path: str, file_size=-1, file_name='') -> RetVal: '''Generate a test file containing nothing but zeroes. If the file size is negative, a random size between 1 and 10 Kb will be chosen. If the file name is empty, a random one will be generated. Returns: name: (str) name of the test file generated size: (int) size of the test file generated ''' if file_size < 0: file_size = random.randint(1,10) * 1024 if file_name == '' or not file_name: file_name = f"{int(time.time())}.{file_size}.{str(uuid.uuid4())}" try: fhandle = open(os.path.join(path, file_name), 'w') except Exception as e: return RetVal().wrap_exception(e) fhandle.write('0' * file_size) fhandle.close() return RetVal().set_values({ 'name':file_name, 'size':file_size })
40
64
# pylint: disable=too-many-lines import os import random import shutil import time import uuid from retval import RetVal from pycryptostring import CryptoString from pymensago.encryption import EncryptionPair from pymensago.hash import blake2hash from pymensago.serverconn import ServerConnection from integration_setup import login_admin, regcode_admin, setup_test, init_server, init_user, \ init_user2, reset_top_dir from tests.integration.integration_setup import funcname server_response = { 'title' : 'Mensago Server Response', 'type' : 'object', 'required' : [ 'Code', 'Status', 'Info', 'Data' ], 'properties' : { 'Code' : { 'type' : 'integer' }, 'Status' : { 'type' : 'string' }, 'Info' : { 'type' : 'string' }, 'Data' : { 'type' : 'object' } } } def make_test_file(path: str, file_size=-1, file_name='') -> RetVal: '''Generate a test file containing nothing but zeroes. If the file size is negative, a random size between 1 and 10 Kb will be chosen. If the file name is empty, a random one will be generated. Returns: name: (str) name of the test file generated size: (int) size of the test file generated ''' if file_size < 0: file_size = random.randint(1,10) * 1024 if file_name == '' or not file_name: file_name = f"{int(time.time())}.{file_size}.{str(uuid.uuid4())}" try: fhandle = open(os.path.join(path, file_name), 'w') except Exception as e: return RetVal().wrap_exception(e) fhandle.write('0' * file_size) fhandle.close() return RetVal().set_values({ 'name':file_name, 'size':file_size }) def setup_testdir(name) -> str: '''Creates a test folder for holding files''' topdir = os.path.join(os.path.dirname(os.path.realpath(__file__)),'testfiles') if not os.path.exists(topdir): os.mkdir(topdir) testdir = os.path.join(topdir, name) while os.path.exists(testdir): try: shutil.rmtree(testdir) except: print("Waiting a second for test folder to unlock") time.sleep(1.0) os.mkdir(testdir) return testdir def test_copy(): '''Tests the COPY command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Set up the directory hierarchy admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']) inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111') os.mkdir(inner_dir) # Subtest #1: Nonexistent source file conn.send_message({ 'Action': 'COPY', 'Data': { 'SourceFile': '/ wsp ' + dbdata['admin_wid'] + ' 1.1.01234567-89ab-cdef-0123-456789abcdef', 'DestDir': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_copy: #1 failed to handle nonexistent source file' # Subtest #2: Nonexistent destination directory # By making this 1MB + 1byte, the file's mere existence will put us over the limit of the 1MB # disk quota status = make_test_file(admin_dir, file_size=0x10_0001) assert not status.error(), 'test_copy: #2 failed to create a test file' testfile1 = status['name'] conn.send_message({ 'Action': 'COPY', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222" } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_copy: #2 failed to handle nonexistent destination dir' # Subtest #3: Source path is a directory conn.send_message({ 'Action': 'COPY', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111" } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_copy: #3 failed to handle directory as source' # Subtest #4: Destination is file path # Normally each file on the system has a unique name, but having a duplicate in this case # won't matter status = make_test_file(inner_dir, 102400, testfile1) conn.send_message({ 'Action': 'COPY', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}" } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_copy: #4 failed to handle file as destination' # Subtest #5: Insufficient quota remaining # The administrator normally can't have a quota. We'll just fix that just for this one test # *heh* # We actually have to do an update instead of an insert because the quota checks in earlier # calls ensure that there is a quota record for admin in the database cur = dbconn.cursor() cur.execute(f"UPDATE quotas SET quota=1 WHERE wid='{dbdata['admin_wid']}'") dbconn.commit() conn.send_message({ 'Action': 'COPY', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111" } }) response = conn.read_response(server_response) assert response['Code'] == 409, 'test_copy: #5 failed to handle quota limit' # We need this to be unlimited for later tests cur = dbconn.cursor() cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'") dbconn.commit() # Subtest #6: Actual success conn.send_message({ 'Action': 'COPY', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111" } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_copy: #6 failed to succeed' conn.disconnect() def test_delete(): '''Test the DELETE command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Subtest #1: Bad path conn.send_message({ 'Action': 'DELETE', 'Data': { 'Path': f"/ wsp {dbdata['admin_wid']} some_dir_name" } }) response = conn.read_response(server_response) assert response['Code'] == 400, f"{funcname()}: failed to handle bad path" # Subtest #2: Directory doesn't exist conn.send_message({ 'Action': 'DELETE', 'Data': { 'Path': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111" } }) response = conn.read_response(server_response) assert response['Code'] == 404, f"{funcname()}: #2 failed to handle nonexistent file" # Subtest #3: Actual success admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']) status = make_test_file(admin_dir) assert not status.error(), f"{funcname()}: #3 failed to create test file" filename = status["name"] conn.send_message({ 'Action': 'DELETE', 'Data': { 'Path': f"/ wsp {dbdata['admin_wid']} {filename}" } }) response = conn.read_response(server_response) assert response['Code'] == 200, f"{funcname()}: #3 failed to delete file" def test_download(): '''This tests the command DOWNLOAD''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) init_user(dbdata, conn) # Subtest #1: Missing parameters conn.send_message({'Action': 'DOWNLOAD','Data': {}}) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_download: #1 failed to handle missing parameter' # Subtest #2: Non-existent path conn.send_message({ 'Action': 'DOWNLOAD', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222' + ' 1000.1000.22222222-2222-2222-2222-222222222222' } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_download: #2 failed to handle non-existent path' # Subtest #3: Actual success status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']), file_size=1000) assert not status.error(), f"test_download: #3 failed to create test file: {status.info}" testname = status['name'] conn.send_message({ 'Action': 'DOWNLOAD', 'Data': { 'Path': f"/ wsp {dbdata['admin_wid']} {testname}" } }) response = conn.read_response(server_response) assert response['Code'] == 100, 'test_download: #3 failed to proceed to file download' assert 'Size' in response['Data'] and response['Data']['Size'] == '1000', \ 'test_download: #3 server failed to respond with file size' conn.send_message({ 'Action': 'DOWNLOAD', 'Data': { 'Path': f"/ wsp {dbdata['admin_wid']} {testname}", 'Size': '1000' } }) rawdata = conn.read() assert len(rawdata) == 1000, 'test_download: #3 downloaded file had wrong length' # Set up an 'interrupted' transfer status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']), file_size=1000) assert not status.error(), f"test_download: #4 failed to create test file: {status.info}" testname = status['name'] # Subtest #7: Resume offset larger than size of data stored server-side conn.send_message({ 'Action': 'DOWNLOAD', 'Data': { 'Path': f"/ wsp {dbdata['admin_wid']} {testname}", 'Offset': '2500' } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_download: #4 failed to handle offset > file size' # Subtest #5: Resume interrupted transfer - exact match conn.send_message({ 'Action': 'DOWNLOAD', 'Data': { 'Path': f"/ wsp {dbdata['admin_wid']} {testname}", 'Offset': '500' } }) response = conn.read_response(server_response) assert response['Code'] == 100, 'test_download: #3 failed to proceed to file download' assert 'Size' in response['Data'] and response['Data']['Size'] == '1000', \ 'test_download: #5 server failed to respond with file size' conn.send_message({ 'Action': 'DOWNLOAD', 'Data': { 'Path': f"/ wsp {dbdata['admin_wid']} {testname}", 'Offset': '500', 'Size': '1000' } }) rawdata = conn.read() assert len(rawdata) == 500, 'test_download: #5 resumed data had wrong length' assert blake2hash((('0' * 500) + rawdata).encode()) == \ 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', \ 'test_download: #8 resumed file hash failure' conn.disconnect() def test_getquotainfo(): '''This tests the command GETQUOTAINFO, which gets both the quota for the workspace and the disk usage''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) init_user(dbdata, conn) status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']), file_size=1000) assert not status.error(), f"Failed to create test workspace file: {status.info}" conn.send_message({ 'Action': 'GETQUOTAINFO', 'Data': {} }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_getquotainfo: failed to get quota information' assert response['Data']['DiskUsage'] == '1000', 'test_getquotainfo: disk usage was incorrect' assert response['Data']['QuotaSize'] == '0', \ "test_getquotainfo: admin quota wasn't unlimited" conn.disconnect() def test_list(): '''Tests the LIST command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Subtest #1: Nonexistent path conn.send_message({ 'Action': 'LIST', 'Data': { 'Path': '/ 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_list: #1 failed to handle missing path' # Subtest #2: Path is a file admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']) status = make_test_file(admin_dir) assert not status.error(), "test_list: #2 failed to create test file" conn.send_message({ 'Action': 'LIST', 'Data': { 'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']]) } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_list: #2 failed to handle path as file' # Subtest #3: Empty directory os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')) conn.send_message({ 'Action': 'LIST', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_list: #3 failed to handle empty directory' assert 'Files' in response['Data'] and len(response['Data']['Files']) == 0, \ 'test_list: #3 failed to have empty response for empty directory' # Subtest #4: A list of files for i in range(1,6): tempname = '.'.join([str(1000 * i), '500', str(uuid.uuid4())]) try: fhandle = open(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111', tempname), 'w') except Exception as e: assert False, 'test_list: #4 failed to create test files: ' + e fhandle.write('0' * 500) fhandle.close() conn.send_message({ 'Action': 'LIST', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_list: #4 failed to handle non-empty directory' assert 'Files' in response['Data'] and len(response['Data']['Files']) == 5, \ 'test_list: #4 failed to list all files in directory' # Subtest #5: A list of files with time specifier conn.send_message({ 'Action': 'LIST', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111', 'Time': '3000' } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_list: #5 failed to handle non-empty directory' assert 'Files' in response['Data'] and len(response['Data']['Files']) == 3, \ 'test_list: #5 failed to filter files' conn.disconnect() def test_listdirs(): '''Tests the LISTDIRS command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Subtest #1: Nonexistent path conn.send_message({ 'Action': 'LISTDIRS', 'Data': { 'Path': '/ 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_listdirs: #1 failed to handle missing path' # Subtest #2: Path is a file admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']) status = make_test_file(admin_dir) assert not status.error(), "test_listdirs: #2 failed to create test file" conn.send_message({ 'Action': 'LIST', 'Data': { 'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']]) } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_listdirs: #2 failed to handle path as file' # Subtest #3: Empty directory os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')) conn.send_message({ 'Action': 'LISTDIRS', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_listdirs: #3 failed to handle empty directory' assert 'Directories' in response['Data'] and len(response['Data']['Directories']) == 0, \ 'test_listdirs: #3 failed to have empty response for empty directory' # Subtest #4: A list of directories for i in range(2,7): tempname = '-'.join([(str(i) * 8), (str(i) * 4), (str(i) * 4), (str(i) * 4), (str(i) * 12)]) try: os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111', tempname)) except Exception as e: assert False, 'test_listdirs: #4 failed to create test directories: ' + e make_test_file(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')) conn.send_message({ 'Action': 'LISTDIRS', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_listdirs: #4 failed to handle non-empty directory' assert 'Directories' in response['Data'] and len(response['Data']['Directories']) == 5, \ 'test_list: #4 failed to list all subdirectories' conn.disconnect() def test_mkdir(): '''Tests the MKDIR command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Subtest #1: Bad directory name conn.send_message({ 'Action': 'MKDIR', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' some_dir_name' } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_mkdir: #1 failed to handle bad path' # Subtest #2: Actual success - 1 directory conn.send_message({ 'Action': 'MKDIR', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_mkdir: #2 failed to create legitimate directory' # Subtest #3: Directory already exists conn.send_message({ 'Action': 'MKDIR', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 408, 'test_mkdir: #3 failed to handle existing directory' # Subtest #4: Actual success - nested directories multipath = ' '.join(['/', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444', '55555555-5555-5555-5555-555555555555' ]) conn.send_message({ 'Action': 'MKDIR', 'Data': { 'Path': multipath } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_mkdir: #2 failed to create legitimate directory' conn.disconnect() def test_move(): '''Tests the MOVE command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Set up the directory hierarchy admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']) inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111') os.mkdir(inner_dir) # Subtest #1: Nonexistent source file conn.send_message({ 'Action': 'MOVE', 'Data': { 'SourceFile': '/ ' + dbdata['admin_wid'] + ' 1.1.01234567-89ab-cdef-0123-456789abcdef', 'DestDir': '/ ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_move: #1 failed to handle nonexistent source file' # Subtest #2: Nonexistent destination directory status = make_test_file(admin_dir) assert not status.error(), 'test_move: #2 failed to create a test file' testfile1 = status['name'] conn.send_message({ 'Action': 'MOVE', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222" } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_move: #2 failed to handle nonexistent destination dir' # Subtest #3: Source path is a directory conn.send_message({ 'Action': 'MOVE', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111" } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_move: #3 failed to handle directory as source' # Subtest #4: Destination is file path # Normally each file on the system has a unique name, but having a duplicate in this case # won't matter status = make_test_file(inner_dir, 102400, testfile1) conn.send_message({ 'Action': 'MOVE', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}" } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_copy: #4 failed to handle file as destination' os.remove(os.path.join(inner_dir, status['name'])) # Subtest #5: Actual success conn.send_message({ 'Action': 'MOVE', 'Data': { 'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111" } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_copy: #6 failed to succeed' conn.disconnect() def test_replace(): '''Test the REPLACE command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Subtest #1: Bad old file path conn.send_message({ 'Action': 'REPLACE', 'Data': { 'OldPath': f"/ wsp {dbdata['admin_wid']} some_dir_name", 'NewPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111", 'Size': "1234", 'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT' } }) response = conn.read_response(server_response) assert response['Code'] == 400, f"{funcname()}: #1 failed to handle bad old file path" admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']) status = make_test_file(admin_dir) filename = status['name'] # Subtest #2: Bad new file path conn.send_message({ 'Action': 'REPLACE', 'Data': { 'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}", 'NewPath': f"/ wsp {dbdata['admin_wid']} some_dir_name", 'Size': "1234", 'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT' } }) response = conn.read_response(server_response) assert response['Code'] == 400, f"{funcname()}: #2 failed to handle bad new file path" # Subtest #4: Destination directory doesn't exist conn.send_message({ 'Action': 'REPLACE', 'Data': { 'OldPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111", 'NewPath': "/ wsp 11111111-1111-1111-1111-111111111111", 'Size': "4321", 'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT' } }) response = conn.read_response(server_response) assert response['Code'] == 404, f"{funcname()}: #4 failed to handle nonexistent destination dir" # Subtest #5: Actual success status = make_test_file(admin_dir) assert not status.error(), f"{funcname()}: #3 failed to create test file" filename = status["name"] conn.send_message({ 'Action': 'REPLACE', 'Data': { 'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}", 'NewPath': f"/ wsp {dbdata['admin_wid']}", 'Size': "1000", 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp' } }) response = conn.read_response(server_response) assert response['Code'] == 100, f'{funcname()}: #6 failed to proceed to file upload' conn.write('0' * 1000) response = conn.read_response(server_response) assert response['Code'] == 200, f'{funcname()}: #6 failed to replace file' conn.disconnect() def test_rmdir(): '''Tests the RMDIR command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Subtest #1: Bad directory name conn.send_message({ 'Action': 'RMDIR', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' some_dir_name', 'Recursive': 'False' } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_rmdir: #1 failed to handle bad path' # Subtest #2: Directory doesn't exist conn.send_message({ 'Action': 'RMDIR', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 11111111-1111-1111-1111-111111111111', 'Recursive': 'False' } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_rmdir: #2 failed to handle nonexistent directory' # Subtest #3: Call fails because of non-empty directory multipath = ' '.join(['/ wsp', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444', '55555555-5555-5555-5555-555555555555' ]) conn.send_message({ 'Action': 'MKDIR', 'Data': { 'Path': multipath } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_rmdir: #3 failed to create test hierarchy' conn.send_message({ 'Action': 'RMDIR', 'Data': { 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222', 'Recursive': 'False' } }) response = conn.read_response(server_response) assert response['Code'] == 408, 'test_rmdir: #3 failed to handle non-empty directory' # Subtest #4: Actual success - non-recursively remove an empty directory conn.send_message({ 'Action': 'RMDIR', 'Data': { 'Path': multipath } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_rmdir: #4 failed to remove an empty directory' def test_select(): '''Tests the SELECT command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) # Subtest #1: Nonexistent path conn.send_message({ 'Action': 'SELECT', 'Data': { 'Path': '/ 11111111-1111-1111-1111-111111111111' } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_select: #1 failed to handle missing path' # Subtest #2: Path is a file admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']) status = make_test_file(admin_dir) assert not status.error(), "test_select: #2 failed to create test file" conn.send_message({ 'Action': 'SELECT', 'Data': { 'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']]) } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_select: #2 failed to handle path as file' # Subtest #3: Actual success innerpath = ' '.join(['/ wsp', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222']) conn.send_message({ 'Action': 'MKDIR', 'Data': { 'Path': innerpath } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_select: #3 failed to create test directory' conn.send_message({ 'Action': 'SELECT', 'Data': { 'Path': innerpath } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_select: #3 failed to work correctly' conn.disconnect() def test_setquota(): '''Tests the SETQUOTA command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) init_user(dbdata, conn) init_user2(dbdata, conn) # Subtest #1: Bad sizes conn.send_message({ 'Action': 'SETQUOTA', 'Data': { 'Size': '0', 'Workspaces': '33333333-3333-3333-3333-333333333333' } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_setquota: failed to handle bad size value' conn.send_message({ 'Action': 'SETQUOTA', 'Data': { 'Size': "Real programmers don't eat quiche ;)", 'Workspaces': '33333333-3333-3333-3333-333333333333' } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_setquota: failed to handle bad size data type' # Subtest #2: Bad workspace list conn.send_message({ 'Action': 'SETQUOTA', 'Data': { 'Size': "4096", 'Workspaces': '33333333-3333-3333-3333-333333333333,' } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_setquota: failed to handle bad workspace list' # Subtest #3: Actual success conn.send_message({ 'Action': 'SETQUOTA', 'Data': { 'Size': "4096", 'Workspaces': '33333333-3333-3333-3333-333333333333, ' \ '44444444-4444-4444-4444-444444444444' } }) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_setquota: failed to handle actual success' conn.disconnect() def test_upload(): '''Tests the UPLOAD command''' dbconn = setup_test() dbdata = init_server(dbconn) conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" reset_top_dir(dbdata) # password is 'SandstoneAgendaTricycle' pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCq' \ 'dcCYkJLok65qussSyhN5TTZP+OTgzEI' devid = '22222222-2222-2222-2222-222222222222' devpair = EncryptionPair(CryptoString(r'CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString(r'CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l')) dbdata['pwhash'] = pwhash dbdata['devid'] = devid dbdata['devpair'] = devpair regcode_admin(dbdata, conn) login_admin(dbdata, conn) init_user(dbdata, conn) # Subtest #1: Missing parameters conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': '1000', # Hash parameter is missing 'Path': '/ wsp ' + dbdata['admin_wid'] } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_upload: #1 failed to handle missing parameter' # Subtest #2: Non-existent path conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': '1000', 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'] + ' 22222222-2222-2222-2222-222222222222' } }) response = conn.read_response(server_response) assert response['Code'] == 404, 'test_upload: #2 failed to handle non-existent path' # Subtest #3: Size too big conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(0x4000_0000 * 200), # 200GiB isn't all that big :P 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'] } }) response = conn.read_response(server_response) assert response['Code'] == 414, 'test_upload: #3 failed to handle file too big' # Subtest #4: Insufficient quota remaining # The administrator normally can't have a quota. We'll just fix that just for this one test # *heh* # Normally in Python direct string substitution is a recipe for SQL injection. We're not # bringing in any insecure code here, so it's only a little bit bad. cur = dbconn.cursor() cur.execute(f"INSERT INTO quotas(wid, usage, quota) VALUES('{dbdata['admin_wid']}', 5100 , 5120)") dbconn.commit() conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(0x10_0000 * 30), # 30MiB 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'] } }) response = conn.read_response(server_response) assert response['Code'] == 409, 'test_upload: #4 quota check failed' # We need this to be unlimited for later tests cur = dbconn.cursor() cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'") dbconn.commit() # Subtest #5: Hash mismatch conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(1000), 'Hash': r'BLAKE2B-256:5(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'] } }) response = conn.read_response(server_response) assert response['Code'] == 100, 'test_upload: #5 failed to proceed to file upload' conn.write('0' * 1000) response = conn.read_response(server_response) assert response['Code'] == 410, 'test_upload: #5 failed to handle file hash mismatch' # Subtest #6: Actual success conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(1000), 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'] } }) response = conn.read_response(server_response) assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload' conn.write('0' * 1000) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_upload: #6 failed to handle file hash mismatch' # Set up an interrupted transfer conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(1000), 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'] } }) response = conn.read_response(server_response) tempFileName = response['Data']['TempName'] assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload' assert tempFileName != '', 'test_upload: #6 server failed to return temp file name' conn.write('0' * 500) del conn conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" login_admin(dbdata, conn) # Subtest #7: Resume offset larger than size of data stored server-side conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(1000), 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'], 'TempName': tempFileName, 'Offset': '2000' } }) response = conn.read_response(server_response) assert response['Code'] == 400, 'test_upload: #7 failed to handle offset > file size' # Subtest #8: Resume interrupted transfer - exact match conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(1000), 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'], 'TempName': tempFileName, 'Offset': '500' } }) response = conn.read_response(server_response) assert response['Code'] == 100, 'test_upload: #8 failed to proceed to file upload' conn.write('0' * 500) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_upload: #8 failed to resume with exact offset match' # Set up one last interrupted transfer conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(1000), 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'] } }) response = conn.read_response(server_response) tempFileName = response['Data']['TempName'] assert response['Code'] == 100, 'test_upload: #6 failed to proceed to file upload' assert tempFileName != '', 'test_upload: #6 server failed to return temp file name' conn.write('0' * 500) del conn conn = ServerConnection() assert conn.connect('localhost', 2001), "Connection to server at localhost:2001 failed" login_admin(dbdata, conn) # Subtest #9: Overlapping resume conn.send_message({ 'Action': 'UPLOAD', 'Data': { 'Size': str(1000), 'Hash': r'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': '/ wsp ' + dbdata['admin_wid'], 'TempName': tempFileName, 'Offset': '400' } }) response = conn.read_response(server_response) assert response['Code'] == 100, 'test_upload: #9 failed to proceed to file upload' conn.write('0' * 600) response = conn.read_response(server_response) assert response['Code'] == 200, 'test_upload: #9 failed to resume with overlapping offset' conn.disconnect() if __name__ == '__main__': # test_copy() # test_delete() # test_download() # test_getquotainfo() # test_list() # test_listdirs() # test_mkdir() # test_move() test_replace() # test_rmdir() # test_setquota() # test_select() # test_upload()
id_to_svd
Convert ID to SVD. The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and coefficients `idx` and `proj`, respectively, is:: U, S, V = id_to_svd(B, idx, proj) A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T)) See also :func:`svd`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`_backend.idd_id2svd` and :func:`_backend.idz_id2svd`. Parameters ---------- B : :class:`numpy.ndarray` Skeleton matrix. idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. Returns ------- U : :class:`numpy.ndarray` Left singular vectors. S : :class:`numpy.ndarray` Singular values. V : :class:`numpy.ndarray` Right singular vectors.
#****************************************************************************** # Copyright (C) 2013 Kenneth L. Ho # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. Redistributions in binary # form must reproduce the above copyright notice, this list of conditions and # the following disclaimer in the documentation and/or other materials # provided with the distribution. # # None of the names of the copyright holders may be used to endorse or # promote products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #****************************************************************************** # Python module for interfacing with `id_dist`. r""" ====================================================================== Interpolative matrix decomposition (:mod:`scipy.linalg.interpolative`) ====================================================================== .. moduleauthor:: Kenneth L. Ho <[email protected]> .. versionadded:: 0.13 .. currentmodule:: scipy.linalg.interpolative An interpolative decomposition (ID) of a matrix :math:`A \in \mathbb{C}^{m \times n}` of rank :math:`k \leq \min \{ m, n \}` is a factorization .. math:: A \Pi = \begin{bmatrix} A \Pi_{1} & A \Pi_{2} \end{bmatrix} = A \Pi_{1} \begin{bmatrix} I & T \end{bmatrix}, where :math:`\Pi = [\Pi_{1}, \Pi_{2}]` is a permutation matrix with :math:`\Pi_{1} \in \{ 0, 1 \}^{n \times k}`, i.e., :math:`A \Pi_{2} = A \Pi_{1} T`. This can equivalently be written as :math:`A = BP`, where :math:`B = A \Pi_{1}` and :math:`P = [I, T] \Pi^{\mathsf{T}}` are the *skeleton* and *interpolation matrices*, respectively. If :math:`A` does not have exact rank :math:`k`, then there exists an approximation in the form of an ID such that :math:`A = BP + E`, where :math:`\| E \| \sim \sigma_{k + 1}` is on the order of the :math:`(k + 1)`-th largest singular value of :math:`A`. Note that :math:`\sigma_{k + 1}` is the best possible error for a rank-:math:`k` approximation and, in fact, is achieved by the singular value decomposition (SVD) :math:`A \approx U S V^{*}`, where :math:`U \in \mathbb{C}^{m \times k}` and :math:`V \in \mathbb{C}^{n \times k}` have orthonormal columns and :math:`S = \mathop{\mathrm{diag}} (\sigma_{i}) \in \mathbb{C}^{k \times k}` is diagonal with nonnegative entries. The principal advantages of using an ID over an SVD are that: - it is cheaper to construct; - it preserves the structure of :math:`A`; and - it is more efficient to compute with in light of the identity submatrix of :math:`P`. Routines ======== Main functionality: .. autosummary:: :toctree: generated/ interp_decomp reconstruct_matrix_from_id reconstruct_interp_matrix reconstruct_skel_matrix id_to_svd svd estimate_spectral_norm estimate_spectral_norm_diff estimate_rank Support functions: .. autosummary:: :toctree: generated/ seed rand References ========== This module uses the ID software package [1]_ by Martinsson, Rokhlin, Shkolnisky, and Tygert, which is a Fortran library for computing IDs using various algorithms, including the rank-revealing QR approach of [2]_ and the more recent randomized methods described in [3]_, [4]_, and [5]_. This module exposes its functionality in a way convenient for Python users. Note that this module does not add any functionality beyond that of organizing a simpler and more consistent interface. We advise the user to consult also the `documentation for the ID package <http://tygert.com/id_doc.4.pdf>`_. .. [1] P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, M. Tygert. "ID: a software package for low-rank approximation of matrices via interpolative decompositions, version 0.2." http://tygert.com/id_doc.4.pdf. .. [2] H. Cheng, Z. Gimbutas, P.G. Martinsson, V. Rokhlin. "On the compression of low rank matrices." *SIAM J. Sci. Comput.* 26 (4): 1389--1404, 2005. :doi:`10.1137/030602678`. .. [3] E. Liberty, F. Woolfe, P.G. Martinsson, V. Rokhlin, M. Tygert. "Randomized algorithms for the low-rank approximation of matrices." *Proc. Natl. Acad. Sci. U.S.A.* 104 (51): 20167--20172, 2007. :doi:`10.1073/pnas.0709640104`. .. [4] P.G. Martinsson, V. Rokhlin, M. Tygert. "A randomized algorithm for the decomposition of matrices." *Appl. Comput. Harmon. Anal.* 30 (1): 47--68, 2011. :doi:`10.1016/j.acha.2010.02.003`. .. [5] F. Woolfe, E. Liberty, V. Rokhlin, M. Tygert. "A fast randomized algorithm for the approximation of matrices." *Appl. Comput. Harmon. Anal.* 25 (3): 335--366, 2008. :doi:`10.1016/j.acha.2007.12.002`. Tutorial ======== Initializing ------------ The first step is to import :mod:`scipy.linalg.interpolative` by issuing the command: >>> import scipy.linalg.interpolative as sli Now let's build a matrix. For this, we consider a Hilbert matrix, which is well know to have low rank: >>> from scipy.linalg import hilbert >>> n = 1000 >>> A = hilbert(n) We can also do this explicitly via: >>> import numpy as np >>> n = 1000 >>> A = np.empty((n, n), order='F') >>> for j in range(n): >>> for i in range(m): >>> A[i,j] = 1. / (i + j + 1) Note the use of the flag ``order='F'`` in :func:`numpy.empty`. This instantiates the matrix in Fortran-contiguous order and is important for avoiding data copying when passing to the backend. We then define multiplication routines for the matrix by regarding it as a :class:`scipy.sparse.linalg.LinearOperator`: >>> from scipy.sparse.linalg import aslinearoperator >>> L = aslinearoperator(A) This automatically sets up methods describing the action of the matrix and its adjoint on a vector. Computing an ID --------------- We have several choices of algorithm to compute an ID. These fall largely according to two dichotomies: 1. how the matrix is represented, i.e., via its entries or via its action on a vector; and 2. whether to approximate it to a fixed relative precision or to a fixed rank. We step through each choice in turn below. In all cases, the ID is represented by three parameters: 1. a rank ``k``; 2. an index array ``idx``; and 3. interpolation coefficients ``proj``. The ID is specified by the relation ``np.dot(A[:,idx[:k]], proj) == A[:,idx[k:]]``. From matrix entries ................... We first consider a matrix given in terms of its entries. To compute an ID to a fixed precision, type: >>> k, idx, proj = sli.interp_decomp(A, eps) where ``eps < 1`` is the desired precision. To compute an ID to a fixed rank, use: >>> idx, proj = sli.interp_decomp(A, k) where ``k >= 1`` is the desired rank. Both algorithms use random sampling and are usually faster than the corresponding older, deterministic algorithms, which can be accessed via the commands: >>> k, idx, proj = sli.interp_decomp(A, eps, rand=False) and: >>> idx, proj = sli.interp_decomp(A, k, rand=False) respectively. From matrix action .................. Now consider a matrix given in terms of its action on a vector as a :class:`scipy.sparse.linalg.LinearOperator`. To compute an ID to a fixed precision, type: >>> k, idx, proj = sli.interp_decomp(L, eps) To compute an ID to a fixed rank, use: >>> idx, proj = sli.interp_decomp(L, k) These algorithms are randomized. Reconstructing an ID -------------------- The ID routines above do not output the skeleton and interpolation matrices explicitly but instead return the relevant information in a more compact (and sometimes more useful) form. To build these matrices, write: >>> B = sli.reconstruct_skel_matrix(A, k, idx) for the skeleton matrix and: >>> P = sli.reconstruct_interp_matrix(idx, proj) for the interpolation matrix. The ID approximation can then be computed as: >>> C = np.dot(B, P) This can also be constructed directly using: >>> C = sli.reconstruct_matrix_from_id(B, idx, proj) without having to first compute ``P``. Alternatively, this can be done explicitly as well using: >>> B = A[:,idx[:k]] >>> P = np.hstack([np.eye(k), proj])[:,np.argsort(idx)] >>> C = np.dot(B, P) Computing an SVD ---------------- An ID can be converted to an SVD via the command: >>> U, S, V = sli.id_to_svd(B, idx, proj) The SVD approximation is then: >>> C = np.dot(U, np.dot(np.diag(S), np.dot(V.conj().T))) The SVD can also be computed "fresh" by combining both the ID and conversion steps into one command. Following the various ID algorithms above, there are correspondingly various SVD algorithms that one can employ. From matrix entries ................... We consider first SVD algorithms for a matrix given in terms of its entries. To compute an SVD to a fixed precision, type: >>> U, S, V = sli.svd(A, eps) To compute an SVD to a fixed rank, use: >>> U, S, V = sli.svd(A, k) Both algorithms use random sampling; for the determinstic versions, issue the keyword ``rand=False`` as above. From matrix action .................. Now consider a matrix given in terms of its action on a vector. To compute an SVD to a fixed precision, type: >>> U, S, V = sli.svd(L, eps) To compute an SVD to a fixed rank, use: >>> U, S, V = sli.svd(L, k) Utility routines ---------------- Several utility routines are also available. To estimate the spectral norm of a matrix, use: >>> snorm = sli.estimate_spectral_norm(A) This algorithm is based on the randomized power method and thus requires only matrix-vector products. The number of iterations to take can be set using the keyword ``its`` (default: ``its=20``). The matrix is interpreted as a :class:`scipy.sparse.linalg.LinearOperator`, but it is also valid to supply it as a :class:`numpy.ndarray`, in which case it is trivially converted using :func:`scipy.sparse.linalg.aslinearoperator`. The same algorithm can also estimate the spectral norm of the difference of two matrices ``A1`` and ``A2`` as follows: >>> diff = sli.estimate_spectral_norm_diff(A1, A2) This is often useful for checking the accuracy of a matrix approximation. Some routines in :mod:`scipy.linalg.interpolative` require estimating the rank of a matrix as well. This can be done with either: >>> k = sli.estimate_rank(A, eps) or: >>> k = sli.estimate_rank(L, eps) depending on the representation. The parameter ``eps`` controls the definition of the numerical rank. Finally, the random number generation required for all randomized routines can be controlled via :func:`scipy.linalg.interpolative.seed`. To reset the seed values to their original values, use: >>> sli.seed('default') To specify the seed values, use: >>> sli.seed(s) where ``s`` must be an integer or array of 55 floats. If an integer, the array of floats is obtained by using ``numpy.random.rand`` with the given integer seed. To simply generate some random numbers, type: >>> sli.rand(n) where ``n`` is the number of random numbers to generate. Remarks ------- The above functions all automatically detect the appropriate interface and work with both real and complex data types, passing input arguments to the proper backend routine. """ import scipy.linalg._interpolative_backend as _backend import numpy as np import sys __all__ = [ 'estimate_rank', 'estimate_spectral_norm', 'estimate_spectral_norm_diff', 'id_to_svd', 'interp_decomp', 'rand', 'reconstruct_interp_matrix', 'reconstruct_matrix_from_id', 'reconstruct_skel_matrix', 'seed', 'svd', ] _DTYPE_ERROR = ValueError("invalid input dtype (input must be float64 or complex128)") _TYPE_ERROR = TypeError("invalid input type (must be array or LinearOperator)") _32BIT_ERROR = ValueError("interpolative decomposition on 32-bit systems " "with complex128 is buggy") _IS_32BIT = (sys.maxsize < 2**32) def _is_real(A): try: if A.dtype == np.complex128: return False elif A.dtype == np.float64: return True else: raise _DTYPE_ERROR except AttributeError as e: raise _TYPE_ERROR from e def seed(seed=None): """ Seed the internal random number generator used in this ID package. The generator is a lagged Fibonacci method with 55-element internal state. Parameters ---------- seed : int, sequence, 'default', optional If 'default', the random seed is reset to a default value. If `seed` is a sequence containing 55 floating-point numbers in range [0,1], these are used to set the internal state of the generator. If the value is an integer, the internal state is obtained from `numpy.random.RandomState` (MT19937) with the integer used as the initial seed. If `seed` is omitted (None), ``numpy.random.rand`` is used to initialize the generator. """ # For details, see :func:`_backend.id_srand`, :func:`_backend.id_srandi`, # and :func:`_backend.id_srando`. if isinstance(seed, str) and seed == 'default': _backend.id_srando() elif hasattr(seed, '__len__'): state = np.asfortranarray(seed, dtype=float) if state.shape != (55,): raise ValueError("invalid input size") elif state.min() < 0 or state.max() > 1: raise ValueError("values not in range [0,1]") _backend.id_srandi(state) elif seed is None: _backend.id_srandi(np.random.rand(55)) else: rnd = np.random.RandomState(seed) _backend.id_srandi(rnd.rand(55)) def rand(*shape): """ Generate standard uniform pseudorandom numbers via a very efficient lagged Fibonacci method. This routine is used for all random number generation in this package and can affect ID and SVD results. Parameters ---------- *shape Shape of output array """ # For details, see :func:`_backend.id_srand`, and :func:`_backend.id_srando`. return _backend.id_srand(np.prod(shape)).reshape(shape) def interp_decomp(A, eps_or_k, rand=True): """ Compute ID of a matrix. An ID of a matrix `A` is a factorization defined by a rank `k`, a column index array `idx`, and interpolation coefficients `proj` such that:: numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]] The original matrix can then be reconstructed as:: numpy.hstack([A[:,idx[:k]], numpy.dot(A[:,idx[:k]], proj)] )[:,numpy.argsort(idx)] or via the routine :func:`reconstruct_matrix_from_id`. This can equivalently be written as:: numpy.dot(A[:,idx[:k]], numpy.hstack([numpy.eye(k), proj]) )[:,np.argsort(idx)] in terms of the skeleton and interpolation matrices:: B = A[:,idx[:k]] and:: P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)] respectively. See also :func:`reconstruct_interp_matrix` and :func:`reconstruct_skel_matrix`. The ID can be computed to any relative precision or rank (depending on the value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then this function has the output signature:: k, idx, proj = interp_decomp(A, eps_or_k) Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output signature is:: idx, proj = interp_decomp(A, eps_or_k) .. This function automatically detects the form of the input parameters and passes them to the appropriate backend. For details, see :func:`_backend.iddp_id`, :func:`_backend.iddp_aid`, :func:`_backend.iddp_rid`, :func:`_backend.iddr_id`, :func:`_backend.iddr_aid`, :func:`_backend.iddr_rid`, :func:`_backend.idzp_id`, :func:`_backend.idzp_aid`, :func:`_backend.idzp_rid`, :func:`_backend.idzr_id`, :func:`_backend.idzr_aid`, and :func:`_backend.idzr_rid`. Parameters ---------- A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec` Matrix to be factored eps_or_k : float or int Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of approximation. rand : bool, optional Whether to use random sampling if `A` is of type :class:`numpy.ndarray` (randomized algorithms are always used if `A` is of type :class:`scipy.sparse.linalg.LinearOperator`). Returns ------- k : int Rank required to achieve specified relative precision if `eps_or_k < 1`. idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. """ from scipy.sparse.linalg import LinearOperator real = _is_real(A) if isinstance(A, np.ndarray): if eps_or_k < 1: eps = eps_or_k if rand: if real: k, idx, proj = _backend.iddp_aid(eps, A) else: if _IS_32BIT: raise _32BIT_ERROR k, idx, proj = _backend.idzp_aid(eps, A) else: if real: k, idx, proj = _backend.iddp_id(eps, A) else: k, idx, proj = _backend.idzp_id(eps, A) return k, idx - 1, proj else: k = int(eps_or_k) if rand: if real: idx, proj = _backend.iddr_aid(A, k) else: if _IS_32BIT: raise _32BIT_ERROR idx, proj = _backend.idzr_aid(A, k) else: if real: idx, proj = _backend.iddr_id(A, k) else: idx, proj = _backend.idzr_id(A, k) return idx - 1, proj elif isinstance(A, LinearOperator): m, n = A.shape matveca = A.rmatvec if eps_or_k < 1: eps = eps_or_k if real: k, idx, proj = _backend.iddp_rid(eps, m, n, matveca) else: if _IS_32BIT: raise _32BIT_ERROR k, idx, proj = _backend.idzp_rid(eps, m, n, matveca) return k, idx - 1, proj else: k = int(eps_or_k) if real: idx, proj = _backend.iddr_rid(m, n, matveca, k) else: if _IS_32BIT: raise _32BIT_ERROR idx, proj = _backend.idzr_rid(m, n, matveca, k) return idx - 1, proj else: raise _TYPE_ERROR def reconstruct_matrix_from_id(B, idx, proj): """ Reconstruct matrix from its ID. A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx` and `proj`, respectively, can be reconstructed as:: numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)] See also :func:`reconstruct_interp_matrix` and :func:`reconstruct_skel_matrix`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`_backend.idd_reconid` and :func:`_backend.idz_reconid`. Parameters ---------- B : :class:`numpy.ndarray` Skeleton matrix. idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. Returns ------- :class:`numpy.ndarray` Reconstructed matrix. """ if _is_real(B): return _backend.idd_reconid(B, idx + 1, proj) else: return _backend.idz_reconid(B, idx + 1, proj) def reconstruct_interp_matrix(idx, proj): """ Reconstruct interpolation matrix from ID. The interpolation matrix can be reconstructed from the ID indices and coefficients `idx` and `proj`, respectively, as:: P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)] The original matrix can then be reconstructed from its skeleton matrix `B` via:: numpy.dot(B, P) See also :func:`reconstruct_matrix_from_id` and :func:`reconstruct_skel_matrix`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`_backend.idd_reconint` and :func:`_backend.idz_reconint`. Parameters ---------- idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. Returns ------- :class:`numpy.ndarray` Interpolation matrix. """ if _is_real(proj): return _backend.idd_reconint(idx + 1, proj) else: return _backend.idz_reconint(idx + 1, proj) def reconstruct_skel_matrix(A, k, idx): """ Reconstruct skeleton matrix from ID. The skeleton matrix can be reconstructed from the original matrix `A` and its ID rank and indices `k` and `idx`, respectively, as:: B = A[:,idx[:k]] The original matrix can then be reconstructed via:: numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)] See also :func:`reconstruct_matrix_from_id` and :func:`reconstruct_interp_matrix`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`_backend.idd_copycols` and :func:`_backend.idz_copycols`. Parameters ---------- A : :class:`numpy.ndarray` Original matrix. k : int Rank of ID. idx : :class:`numpy.ndarray` Column index array. Returns ------- :class:`numpy.ndarray` Skeleton matrix. """ if _is_real(A): return _backend.idd_copycols(A, k, idx + 1) else: return _backend.idz_copycols(A, k, idx + 1) # MASKED: id_to_svd function (lines 732-770) def estimate_spectral_norm(A, its=20): """ Estimate spectral norm of a matrix by the randomized power method. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`_backend.idd_snorm` and :func:`_backend.idz_snorm`. Parameters ---------- A : :class:`scipy.sparse.linalg.LinearOperator` Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). its : int, optional Number of power method iterations. Returns ------- float Spectral norm estimate. """ from scipy.sparse.linalg import aslinearoperator A = aslinearoperator(A) m, n = A.shape matvec = lambda x: A. matvec(x) matveca = lambda x: A.rmatvec(x) if _is_real(A): return _backend.idd_snorm(m, n, matveca, matvec, its=its) else: return _backend.idz_snorm(m, n, matveca, matvec, its=its) def estimate_spectral_norm_diff(A, B, its=20): """ Estimate spectral norm of the difference of two matrices by the randomized power method. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`_backend.idd_diffsnorm` and :func:`_backend.idz_diffsnorm`. Parameters ---------- A : :class:`scipy.sparse.linalg.LinearOperator` First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). B : :class:`scipy.sparse.linalg.LinearOperator` Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). its : int, optional Number of power method iterations. Returns ------- float Spectral norm estimate of matrix difference. """ from scipy.sparse.linalg import aslinearoperator A = aslinearoperator(A) B = aslinearoperator(B) m, n = A.shape matvec1 = lambda x: A. matvec(x) matveca1 = lambda x: A.rmatvec(x) matvec2 = lambda x: B. matvec(x) matveca2 = lambda x: B.rmatvec(x) if _is_real(A): return _backend.idd_diffsnorm( m, n, matveca1, matveca2, matvec1, matvec2, its=its) else: return _backend.idz_diffsnorm( m, n, matveca1, matveca2, matvec1, matvec2, its=its) def svd(A, eps_or_k, rand=True): """ Compute SVD of a matrix via an ID. An SVD of a matrix `A` is a factorization:: A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T)) where `U` and `V` have orthonormal columns and `S` is nonnegative. The SVD can be computed to any relative precision or rank (depending on the value of `eps_or_k`). See also :func:`interp_decomp` and :func:`id_to_svd`. .. This function automatically detects the form of the input parameters and passes them to the appropriate backend. For details, see :func:`_backend.iddp_svd`, :func:`_backend.iddp_asvd`, :func:`_backend.iddp_rsvd`, :func:`_backend.iddr_svd`, :func:`_backend.iddr_asvd`, :func:`_backend.iddr_rsvd`, :func:`_backend.idzp_svd`, :func:`_backend.idzp_asvd`, :func:`_backend.idzp_rsvd`, :func:`_backend.idzr_svd`, :func:`_backend.idzr_asvd`, and :func:`_backend.idzr_rsvd`. Parameters ---------- A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` Matrix to be factored, given as either a :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). eps_or_k : float or int Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of approximation. rand : bool, optional Whether to use random sampling if `A` is of type :class:`numpy.ndarray` (randomized algorithms are always used if `A` is of type :class:`scipy.sparse.linalg.LinearOperator`). Returns ------- U : :class:`numpy.ndarray` Left singular vectors. S : :class:`numpy.ndarray` Singular values. V : :class:`numpy.ndarray` Right singular vectors. """ from scipy.sparse.linalg import LinearOperator real = _is_real(A) if isinstance(A, np.ndarray): if eps_or_k < 1: eps = eps_or_k if rand: if real: U, V, S = _backend.iddp_asvd(eps, A) else: if _IS_32BIT: raise _32BIT_ERROR U, V, S = _backend.idzp_asvd(eps, A) else: if real: U, V, S = _backend.iddp_svd(eps, A) else: U, V, S = _backend.idzp_svd(eps, A) else: k = int(eps_or_k) if k > min(A.shape): raise ValueError("Approximation rank %s exceeds min(A.shape) = " " %s " % (k, min(A.shape))) if rand: if real: U, V, S = _backend.iddr_asvd(A, k) else: if _IS_32BIT: raise _32BIT_ERROR U, V, S = _backend.idzr_asvd(A, k) else: if real: U, V, S = _backend.iddr_svd(A, k) else: U, V, S = _backend.idzr_svd(A, k) elif isinstance(A, LinearOperator): m, n = A.shape matvec = lambda x: A.matvec(x) matveca = lambda x: A.rmatvec(x) if eps_or_k < 1: eps = eps_or_k if real: U, V, S = _backend.iddp_rsvd(eps, m, n, matveca, matvec) else: if _IS_32BIT: raise _32BIT_ERROR U, V, S = _backend.idzp_rsvd(eps, m, n, matveca, matvec) else: k = int(eps_or_k) if real: U, V, S = _backend.iddr_rsvd(m, n, matveca, matvec, k) else: if _IS_32BIT: raise _32BIT_ERROR U, V, S = _backend.idzr_rsvd(m, n, matveca, matvec, k) else: raise _TYPE_ERROR return U, S, V def estimate_rank(A, eps): """ Estimate matrix rank to a specified relative precision using randomized methods. The matrix `A` can be given as either a :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used for each case. If `A` is of type :class:`numpy.ndarray`, then the output rank is typically about 8 higher than the actual numerical rank. .. This function automatically detects the form of the input parameters and passes them to the appropriate backend. For details, see :func:`_backend.idd_estrank`, :func:`_backend.idd_findrank`, :func:`_backend.idz_estrank`, and :func:`_backend.idz_findrank`. Parameters ---------- A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` Matrix whose rank is to be estimated, given as either a :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator` with the `rmatvec` method (to apply the matrix adjoint). eps : float Relative error for numerical rank definition. Returns ------- int Estimated matrix rank. """ from scipy.sparse.linalg import LinearOperator real = _is_real(A) if isinstance(A, np.ndarray): if real: rank = _backend.idd_estrank(eps, A) else: rank = _backend.idz_estrank(eps, A) if rank == 0: # special return value for nearly full rank rank = min(A.shape) return rank elif isinstance(A, LinearOperator): m, n = A.shape matveca = A.rmatvec if real: return _backend.idd_findrank(eps, m, n, matveca) else: return _backend.idz_findrank(eps, m, n, matveca) else: raise _TYPE_ERROR
def id_to_svd(B, idx, proj): """ Convert ID to SVD. The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and coefficients `idx` and `proj`, respectively, is:: U, S, V = id_to_svd(B, idx, proj) A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T)) See also :func:`svd`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`_backend.idd_id2svd` and :func:`_backend.idz_id2svd`. Parameters ---------- B : :class:`numpy.ndarray` Skeleton matrix. idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. Returns ------- U : :class:`numpy.ndarray` Left singular vectors. S : :class:`numpy.ndarray` Singular values. V : :class:`numpy.ndarray` Right singular vectors. """ if _is_real(B): U, V, S = _backend.idd_id2svd(B, idx + 1, proj) else: U, V, S = _backend.idz_id2svd(B, idx + 1, proj) return U, S, V
732
770
#****************************************************************************** # Copyright (C) 2013 Kenneth L. Ho # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. Redistributions in binary # form must reproduce the above copyright notice, this list of conditions and # the following disclaimer in the documentation and/or other materials # provided with the distribution. # # None of the names of the copyright holders may be used to endorse or # promote products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #****************************************************************************** # Python module for interfacing with `id_dist`. r""" ====================================================================== Interpolative matrix decomposition (:mod:`scipy.linalg.interpolative`) ====================================================================== .. moduleauthor:: Kenneth L. Ho <[email protected]> .. versionadded:: 0.13 .. currentmodule:: scipy.linalg.interpolative An interpolative decomposition (ID) of a matrix :math:`A \in \mathbb{C}^{m \times n}` of rank :math:`k \leq \min \{ m, n \}` is a factorization .. math:: A \Pi = \begin{bmatrix} A \Pi_{1} & A \Pi_{2} \end{bmatrix} = A \Pi_{1} \begin{bmatrix} I & T \end{bmatrix}, where :math:`\Pi = [\Pi_{1}, \Pi_{2}]` is a permutation matrix with :math:`\Pi_{1} \in \{ 0, 1 \}^{n \times k}`, i.e., :math:`A \Pi_{2} = A \Pi_{1} T`. This can equivalently be written as :math:`A = BP`, where :math:`B = A \Pi_{1}` and :math:`P = [I, T] \Pi^{\mathsf{T}}` are the *skeleton* and *interpolation matrices*, respectively. If :math:`A` does not have exact rank :math:`k`, then there exists an approximation in the form of an ID such that :math:`A = BP + E`, where :math:`\| E \| \sim \sigma_{k + 1}` is on the order of the :math:`(k + 1)`-th largest singular value of :math:`A`. Note that :math:`\sigma_{k + 1}` is the best possible error for a rank-:math:`k` approximation and, in fact, is achieved by the singular value decomposition (SVD) :math:`A \approx U S V^{*}`, where :math:`U \in \mathbb{C}^{m \times k}` and :math:`V \in \mathbb{C}^{n \times k}` have orthonormal columns and :math:`S = \mathop{\mathrm{diag}} (\sigma_{i}) \in \mathbb{C}^{k \times k}` is diagonal with nonnegative entries. The principal advantages of using an ID over an SVD are that: - it is cheaper to construct; - it preserves the structure of :math:`A`; and - it is more efficient to compute with in light of the identity submatrix of :math:`P`. Routines ======== Main functionality: .. autosummary:: :toctree: generated/ interp_decomp reconstruct_matrix_from_id reconstruct_interp_matrix reconstruct_skel_matrix id_to_svd svd estimate_spectral_norm estimate_spectral_norm_diff estimate_rank Support functions: .. autosummary:: :toctree: generated/ seed rand References ========== This module uses the ID software package [1]_ by Martinsson, Rokhlin, Shkolnisky, and Tygert, which is a Fortran library for computing IDs using various algorithms, including the rank-revealing QR approach of [2]_ and the more recent randomized methods described in [3]_, [4]_, and [5]_. This module exposes its functionality in a way convenient for Python users. Note that this module does not add any functionality beyond that of organizing a simpler and more consistent interface. We advise the user to consult also the `documentation for the ID package <http://tygert.com/id_doc.4.pdf>`_. .. [1] P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, M. Tygert. "ID: a software package for low-rank approximation of matrices via interpolative decompositions, version 0.2." http://tygert.com/id_doc.4.pdf. .. [2] H. Cheng, Z. Gimbutas, P.G. Martinsson, V. Rokhlin. "On the compression of low rank matrices." *SIAM J. Sci. Comput.* 26 (4): 1389--1404, 2005. :doi:`10.1137/030602678`. .. [3] E. Liberty, F. Woolfe, P.G. Martinsson, V. Rokhlin, M. Tygert. "Randomized algorithms for the low-rank approximation of matrices." *Proc. Natl. Acad. Sci. U.S.A.* 104 (51): 20167--20172, 2007. :doi:`10.1073/pnas.0709640104`. .. [4] P.G. Martinsson, V. Rokhlin, M. Tygert. "A randomized algorithm for the decomposition of matrices." *Appl. Comput. Harmon. Anal.* 30 (1): 47--68, 2011. :doi:`10.1016/j.acha.2010.02.003`. .. [5] F. Woolfe, E. Liberty, V. Rokhlin, M. Tygert. "A fast randomized algorithm for the approximation of matrices." *Appl. Comput. Harmon. Anal.* 25 (3): 335--366, 2008. :doi:`10.1016/j.acha.2007.12.002`. Tutorial ======== Initializing ------------ The first step is to import :mod:`scipy.linalg.interpolative` by issuing the command: >>> import scipy.linalg.interpolative as sli Now let's build a matrix. For this, we consider a Hilbert matrix, which is well know to have low rank: >>> from scipy.linalg import hilbert >>> n = 1000 >>> A = hilbert(n) We can also do this explicitly via: >>> import numpy as np >>> n = 1000 >>> A = np.empty((n, n), order='F') >>> for j in range(n): >>> for i in range(m): >>> A[i,j] = 1. / (i + j + 1) Note the use of the flag ``order='F'`` in :func:`numpy.empty`. This instantiates the matrix in Fortran-contiguous order and is important for avoiding data copying when passing to the backend. We then define multiplication routines for the matrix by regarding it as a :class:`scipy.sparse.linalg.LinearOperator`: >>> from scipy.sparse.linalg import aslinearoperator >>> L = aslinearoperator(A) This automatically sets up methods describing the action of the matrix and its adjoint on a vector. Computing an ID --------------- We have several choices of algorithm to compute an ID. These fall largely according to two dichotomies: 1. how the matrix is represented, i.e., via its entries or via its action on a vector; and 2. whether to approximate it to a fixed relative precision or to a fixed rank. We step through each choice in turn below. In all cases, the ID is represented by three parameters: 1. a rank ``k``; 2. an index array ``idx``; and 3. interpolation coefficients ``proj``. The ID is specified by the relation ``np.dot(A[:,idx[:k]], proj) == A[:,idx[k:]]``. From matrix entries ................... We first consider a matrix given in terms of its entries. To compute an ID to a fixed precision, type: >>> k, idx, proj = sli.interp_decomp(A, eps) where ``eps < 1`` is the desired precision. To compute an ID to a fixed rank, use: >>> idx, proj = sli.interp_decomp(A, k) where ``k >= 1`` is the desired rank. Both algorithms use random sampling and are usually faster than the corresponding older, deterministic algorithms, which can be accessed via the commands: >>> k, idx, proj = sli.interp_decomp(A, eps, rand=False) and: >>> idx, proj = sli.interp_decomp(A, k, rand=False) respectively. From matrix action .................. Now consider a matrix given in terms of its action on a vector as a :class:`scipy.sparse.linalg.LinearOperator`. To compute an ID to a fixed precision, type: >>> k, idx, proj = sli.interp_decomp(L, eps) To compute an ID to a fixed rank, use: >>> idx, proj = sli.interp_decomp(L, k) These algorithms are randomized. Reconstructing an ID -------------------- The ID routines above do not output the skeleton and interpolation matrices explicitly but instead return the relevant information in a more compact (and sometimes more useful) form. To build these matrices, write: >>> B = sli.reconstruct_skel_matrix(A, k, idx) for the skeleton matrix and: >>> P = sli.reconstruct_interp_matrix(idx, proj) for the interpolation matrix. The ID approximation can then be computed as: >>> C = np.dot(B, P) This can also be constructed directly using: >>> C = sli.reconstruct_matrix_from_id(B, idx, proj) without having to first compute ``P``. Alternatively, this can be done explicitly as well using: >>> B = A[:,idx[:k]] >>> P = np.hstack([np.eye(k), proj])[:,np.argsort(idx)] >>> C = np.dot(B, P) Computing an SVD ---------------- An ID can be converted to an SVD via the command: >>> U, S, V = sli.id_to_svd(B, idx, proj) The SVD approximation is then: >>> C = np.dot(U, np.dot(np.diag(S), np.dot(V.conj().T))) The SVD can also be computed "fresh" by combining both the ID and conversion steps into one command. Following the various ID algorithms above, there are correspondingly various SVD algorithms that one can employ. From matrix entries ................... We consider first SVD algorithms for a matrix given in terms of its entries. To compute an SVD to a fixed precision, type: >>> U, S, V = sli.svd(A, eps) To compute an SVD to a fixed rank, use: >>> U, S, V = sli.svd(A, k) Both algorithms use random sampling; for the determinstic versions, issue the keyword ``rand=False`` as above. From matrix action .................. Now consider a matrix given in terms of its action on a vector. To compute an SVD to a fixed precision, type: >>> U, S, V = sli.svd(L, eps) To compute an SVD to a fixed rank, use: >>> U, S, V = sli.svd(L, k) Utility routines ---------------- Several utility routines are also available. To estimate the spectral norm of a matrix, use: >>> snorm = sli.estimate_spectral_norm(A) This algorithm is based on the randomized power method and thus requires only matrix-vector products. The number of iterations to take can be set using the keyword ``its`` (default: ``its=20``). The matrix is interpreted as a :class:`scipy.sparse.linalg.LinearOperator`, but it is also valid to supply it as a :class:`numpy.ndarray`, in which case it is trivially converted using :func:`scipy.sparse.linalg.aslinearoperator`. The same algorithm can also estimate the spectral norm of the difference of two matrices ``A1`` and ``A2`` as follows: >>> diff = sli.estimate_spectral_norm_diff(A1, A2) This is often useful for checking the accuracy of a matrix approximation. Some routines in :mod:`scipy.linalg.interpolative` require estimating the rank of a matrix as well. This can be done with either: >>> k = sli.estimate_rank(A, eps) or: >>> k = sli.estimate_rank(L, eps) depending on the representation. The parameter ``eps`` controls the definition of the numerical rank. Finally, the random number generation required for all randomized routines can be controlled via :func:`scipy.linalg.interpolative.seed`. To reset the seed values to their original values, use: >>> sli.seed('default') To specify the seed values, use: >>> sli.seed(s) where ``s`` must be an integer or array of 55 floats. If an integer, the array of floats is obtained by using ``numpy.random.rand`` with the given integer seed. To simply generate some random numbers, type: >>> sli.rand(n) where ``n`` is the number of random numbers to generate. Remarks ------- The above functions all automatically detect the appropriate interface and work with both real and complex data types, passing input arguments to the proper backend routine. """ import scipy.linalg._interpolative_backend as _backend import numpy as np import sys __all__ = [ 'estimate_rank', 'estimate_spectral_norm', 'estimate_spectral_norm_diff', 'id_to_svd', 'interp_decomp', 'rand', 'reconstruct_interp_matrix', 'reconstruct_matrix_from_id', 'reconstruct_skel_matrix', 'seed', 'svd', ] _DTYPE_ERROR = ValueError("invalid input dtype (input must be float64 or complex128)") _TYPE_ERROR = TypeError("invalid input type (must be array or LinearOperator)") _32BIT_ERROR = ValueError("interpolative decomposition on 32-bit systems " "with complex128 is buggy") _IS_32BIT = (sys.maxsize < 2**32) def _is_real(A): try: if A.dtype == np.complex128: return False elif A.dtype == np.float64: return True else: raise _DTYPE_ERROR except AttributeError as e: raise _TYPE_ERROR from e def seed(seed=None): """ Seed the internal random number generator used in this ID package. The generator is a lagged Fibonacci method with 55-element internal state. Parameters ---------- seed : int, sequence, 'default', optional If 'default', the random seed is reset to a default value. If `seed` is a sequence containing 55 floating-point numbers in range [0,1], these are used to set the internal state of the generator. If the value is an integer, the internal state is obtained from `numpy.random.RandomState` (MT19937) with the integer used as the initial seed. If `seed` is omitted (None), ``numpy.random.rand`` is used to initialize the generator. """ # For details, see :func:`_backend.id_srand`, :func:`_backend.id_srandi`, # and :func:`_backend.id_srando`. if isinstance(seed, str) and seed == 'default': _backend.id_srando() elif hasattr(seed, '__len__'): state = np.asfortranarray(seed, dtype=float) if state.shape != (55,): raise ValueError("invalid input size") elif state.min() < 0 or state.max() > 1: raise ValueError("values not in range [0,1]") _backend.id_srandi(state) elif seed is None: _backend.id_srandi(np.random.rand(55)) else: rnd = np.random.RandomState(seed) _backend.id_srandi(rnd.rand(55)) def rand(*shape): """ Generate standard uniform pseudorandom numbers via a very efficient lagged Fibonacci method. This routine is used for all random number generation in this package and can affect ID and SVD results. Parameters ---------- *shape Shape of output array """ # For details, see :func:`_backend.id_srand`, and :func:`_backend.id_srando`. return _backend.id_srand(np.prod(shape)).reshape(shape) def interp_decomp(A, eps_or_k, rand=True): """ Compute ID of a matrix. An ID of a matrix `A` is a factorization defined by a rank `k`, a column index array `idx`, and interpolation coefficients `proj` such that:: numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]] The original matrix can then be reconstructed as:: numpy.hstack([A[:,idx[:k]], numpy.dot(A[:,idx[:k]], proj)] )[:,numpy.argsort(idx)] or via the routine :func:`reconstruct_matrix_from_id`. This can equivalently be written as:: numpy.dot(A[:,idx[:k]], numpy.hstack([numpy.eye(k), proj]) )[:,np.argsort(idx)] in terms of the skeleton and interpolation matrices:: B = A[:,idx[:k]] and:: P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)] respectively. See also :func:`reconstruct_interp_matrix` and :func:`reconstruct_skel_matrix`. The ID can be computed to any relative precision or rank (depending on the value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then this function has the output signature:: k, idx, proj = interp_decomp(A, eps_or_k) Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output signature is:: idx, proj = interp_decomp(A, eps_or_k) .. This function automatically detects the form of the input parameters and passes them to the appropriate backend. For details, see :func:`_backend.iddp_id`, :func:`_backend.iddp_aid`, :func:`_backend.iddp_rid`, :func:`_backend.iddr_id`, :func:`_backend.iddr_aid`, :func:`_backend.iddr_rid`, :func:`_backend.idzp_id`, :func:`_backend.idzp_aid`, :func:`_backend.idzp_rid`, :func:`_backend.idzr_id`, :func:`_backend.idzr_aid`, and :func:`_backend.idzr_rid`. Parameters ---------- A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec` Matrix to be factored eps_or_k : float or int Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of approximation. rand : bool, optional Whether to use random sampling if `A` is of type :class:`numpy.ndarray` (randomized algorithms are always used if `A` is of type :class:`scipy.sparse.linalg.LinearOperator`). Returns ------- k : int Rank required to achieve specified relative precision if `eps_or_k < 1`. idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. """ from scipy.sparse.linalg import LinearOperator real = _is_real(A) if isinstance(A, np.ndarray): if eps_or_k < 1: eps = eps_or_k if rand: if real: k, idx, proj = _backend.iddp_aid(eps, A) else: if _IS_32BIT: raise _32BIT_ERROR k, idx, proj = _backend.idzp_aid(eps, A) else: if real: k, idx, proj = _backend.iddp_id(eps, A) else: k, idx, proj = _backend.idzp_id(eps, A) return k, idx - 1, proj else: k = int(eps_or_k) if rand: if real: idx, proj = _backend.iddr_aid(A, k) else: if _IS_32BIT: raise _32BIT_ERROR idx, proj = _backend.idzr_aid(A, k) else: if real: idx, proj = _backend.iddr_id(A, k) else: idx, proj = _backend.idzr_id(A, k) return idx - 1, proj elif isinstance(A, LinearOperator): m, n = A.shape matveca = A.rmatvec if eps_or_k < 1: eps = eps_or_k if real: k, idx, proj = _backend.iddp_rid(eps, m, n, matveca) else: if _IS_32BIT: raise _32BIT_ERROR k, idx, proj = _backend.idzp_rid(eps, m, n, matveca) return k, idx - 1, proj else: k = int(eps_or_k) if real: idx, proj = _backend.iddr_rid(m, n, matveca, k) else: if _IS_32BIT: raise _32BIT_ERROR idx, proj = _backend.idzr_rid(m, n, matveca, k) return idx - 1, proj else: raise _TYPE_ERROR def reconstruct_matrix_from_id(B, idx, proj): """ Reconstruct matrix from its ID. A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx` and `proj`, respectively, can be reconstructed as:: numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)] See also :func:`reconstruct_interp_matrix` and :func:`reconstruct_skel_matrix`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`_backend.idd_reconid` and :func:`_backend.idz_reconid`. Parameters ---------- B : :class:`numpy.ndarray` Skeleton matrix. idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. Returns ------- :class:`numpy.ndarray` Reconstructed matrix. """ if _is_real(B): return _backend.idd_reconid(B, idx + 1, proj) else: return _backend.idz_reconid(B, idx + 1, proj) def reconstruct_interp_matrix(idx, proj): """ Reconstruct interpolation matrix from ID. The interpolation matrix can be reconstructed from the ID indices and coefficients `idx` and `proj`, respectively, as:: P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)] The original matrix can then be reconstructed from its skeleton matrix `B` via:: numpy.dot(B, P) See also :func:`reconstruct_matrix_from_id` and :func:`reconstruct_skel_matrix`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`_backend.idd_reconint` and :func:`_backend.idz_reconint`. Parameters ---------- idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. Returns ------- :class:`numpy.ndarray` Interpolation matrix. """ if _is_real(proj): return _backend.idd_reconint(idx + 1, proj) else: return _backend.idz_reconint(idx + 1, proj) def reconstruct_skel_matrix(A, k, idx): """ Reconstruct skeleton matrix from ID. The skeleton matrix can be reconstructed from the original matrix `A` and its ID rank and indices `k` and `idx`, respectively, as:: B = A[:,idx[:k]] The original matrix can then be reconstructed via:: numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)] See also :func:`reconstruct_matrix_from_id` and :func:`reconstruct_interp_matrix`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`_backend.idd_copycols` and :func:`_backend.idz_copycols`. Parameters ---------- A : :class:`numpy.ndarray` Original matrix. k : int Rank of ID. idx : :class:`numpy.ndarray` Column index array. Returns ------- :class:`numpy.ndarray` Skeleton matrix. """ if _is_real(A): return _backend.idd_copycols(A, k, idx + 1) else: return _backend.idz_copycols(A, k, idx + 1) def id_to_svd(B, idx, proj): """ Convert ID to SVD. The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and coefficients `idx` and `proj`, respectively, is:: U, S, V = id_to_svd(B, idx, proj) A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T)) See also :func:`svd`. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`_backend.idd_id2svd` and :func:`_backend.idz_id2svd`. Parameters ---------- B : :class:`numpy.ndarray` Skeleton matrix. idx : :class:`numpy.ndarray` Column index array. proj : :class:`numpy.ndarray` Interpolation coefficients. Returns ------- U : :class:`numpy.ndarray` Left singular vectors. S : :class:`numpy.ndarray` Singular values. V : :class:`numpy.ndarray` Right singular vectors. """ if _is_real(B): U, V, S = _backend.idd_id2svd(B, idx + 1, proj) else: U, V, S = _backend.idz_id2svd(B, idx + 1, proj) return U, S, V def estimate_spectral_norm(A, its=20): """ Estimate spectral norm of a matrix by the randomized power method. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`_backend.idd_snorm` and :func:`_backend.idz_snorm`. Parameters ---------- A : :class:`scipy.sparse.linalg.LinearOperator` Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). its : int, optional Number of power method iterations. Returns ------- float Spectral norm estimate. """ from scipy.sparse.linalg import aslinearoperator A = aslinearoperator(A) m, n = A.shape matvec = lambda x: A. matvec(x) matveca = lambda x: A.rmatvec(x) if _is_real(A): return _backend.idd_snorm(m, n, matveca, matvec, its=its) else: return _backend.idz_snorm(m, n, matveca, matvec, its=its) def estimate_spectral_norm_diff(A, B, its=20): """ Estimate spectral norm of the difference of two matrices by the randomized power method. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func:`_backend.idd_diffsnorm` and :func:`_backend.idz_diffsnorm`. Parameters ---------- A : :class:`scipy.sparse.linalg.LinearOperator` First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). B : :class:`scipy.sparse.linalg.LinearOperator` Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). its : int, optional Number of power method iterations. Returns ------- float Spectral norm estimate of matrix difference. """ from scipy.sparse.linalg import aslinearoperator A = aslinearoperator(A) B = aslinearoperator(B) m, n = A.shape matvec1 = lambda x: A. matvec(x) matveca1 = lambda x: A.rmatvec(x) matvec2 = lambda x: B. matvec(x) matveca2 = lambda x: B.rmatvec(x) if _is_real(A): return _backend.idd_diffsnorm( m, n, matveca1, matveca2, matvec1, matvec2, its=its) else: return _backend.idz_diffsnorm( m, n, matveca1, matveca2, matvec1, matvec2, its=its) def svd(A, eps_or_k, rand=True): """ Compute SVD of a matrix via an ID. An SVD of a matrix `A` is a factorization:: A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T)) where `U` and `V` have orthonormal columns and `S` is nonnegative. The SVD can be computed to any relative precision or rank (depending on the value of `eps_or_k`). See also :func:`interp_decomp` and :func:`id_to_svd`. .. This function automatically detects the form of the input parameters and passes them to the appropriate backend. For details, see :func:`_backend.iddp_svd`, :func:`_backend.iddp_asvd`, :func:`_backend.iddp_rsvd`, :func:`_backend.iddr_svd`, :func:`_backend.iddr_asvd`, :func:`_backend.iddr_rsvd`, :func:`_backend.idzp_svd`, :func:`_backend.idzp_asvd`, :func:`_backend.idzp_rsvd`, :func:`_backend.idzr_svd`, :func:`_backend.idzr_asvd`, and :func:`_backend.idzr_rsvd`. Parameters ---------- A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` Matrix to be factored, given as either a :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint). eps_or_k : float or int Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of approximation. rand : bool, optional Whether to use random sampling if `A` is of type :class:`numpy.ndarray` (randomized algorithms are always used if `A` is of type :class:`scipy.sparse.linalg.LinearOperator`). Returns ------- U : :class:`numpy.ndarray` Left singular vectors. S : :class:`numpy.ndarray` Singular values. V : :class:`numpy.ndarray` Right singular vectors. """ from scipy.sparse.linalg import LinearOperator real = _is_real(A) if isinstance(A, np.ndarray): if eps_or_k < 1: eps = eps_or_k if rand: if real: U, V, S = _backend.iddp_asvd(eps, A) else: if _IS_32BIT: raise _32BIT_ERROR U, V, S = _backend.idzp_asvd(eps, A) else: if real: U, V, S = _backend.iddp_svd(eps, A) else: U, V, S = _backend.idzp_svd(eps, A) else: k = int(eps_or_k) if k > min(A.shape): raise ValueError("Approximation rank %s exceeds min(A.shape) = " " %s " % (k, min(A.shape))) if rand: if real: U, V, S = _backend.iddr_asvd(A, k) else: if _IS_32BIT: raise _32BIT_ERROR U, V, S = _backend.idzr_asvd(A, k) else: if real: U, V, S = _backend.iddr_svd(A, k) else: U, V, S = _backend.idzr_svd(A, k) elif isinstance(A, LinearOperator): m, n = A.shape matvec = lambda x: A.matvec(x) matveca = lambda x: A.rmatvec(x) if eps_or_k < 1: eps = eps_or_k if real: U, V, S = _backend.iddp_rsvd(eps, m, n, matveca, matvec) else: if _IS_32BIT: raise _32BIT_ERROR U, V, S = _backend.idzp_rsvd(eps, m, n, matveca, matvec) else: k = int(eps_or_k) if real: U, V, S = _backend.iddr_rsvd(m, n, matveca, matvec, k) else: if _IS_32BIT: raise _32BIT_ERROR U, V, S = _backend.idzr_rsvd(m, n, matveca, matvec, k) else: raise _TYPE_ERROR return U, S, V def estimate_rank(A, eps): """ Estimate matrix rank to a specified relative precision using randomized methods. The matrix `A` can be given as either a :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used for each case. If `A` is of type :class:`numpy.ndarray`, then the output rank is typically about 8 higher than the actual numerical rank. .. This function automatically detects the form of the input parameters and passes them to the appropriate backend. For details, see :func:`_backend.idd_estrank`, :func:`_backend.idd_findrank`, :func:`_backend.idz_estrank`, and :func:`_backend.idz_findrank`. Parameters ---------- A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` Matrix whose rank is to be estimated, given as either a :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator` with the `rmatvec` method (to apply the matrix adjoint). eps : float Relative error for numerical rank definition. Returns ------- int Estimated matrix rank. """ from scipy.sparse.linalg import LinearOperator real = _is_real(A) if isinstance(A, np.ndarray): if real: rank = _backend.idd_estrank(eps, A) else: rank = _backend.idz_estrank(eps, A) if rank == 0: # special return value for nearly full rank rank = min(A.shape) return rank elif isinstance(A, LinearOperator): m, n = A.shape matveca = A.rmatvec if real: return _backend.idd_findrank(eps, m, n, matveca) else: return _backend.idz_findrank(eps, m, n, matveca) else: raise _TYPE_ERROR
get
Get an existing NetworkVirtualAppliance resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._enums import * from ._inputs import * __all__ = ['NetworkVirtualApplianceArgs', 'NetworkVirtualAppliance'] @pulumi.input_type class NetworkVirtualApplianceArgs: def __init__(__self__, *, resource_group_name: pulumi.Input[str], boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, cloud_init_configuration: Optional[pulumi.Input[str]] = None, cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, id: Optional[pulumi.Input[str]] = None, identity: Optional[pulumi.Input['ManagedServiceIdentityArgs']] = None, location: Optional[pulumi.Input[str]] = None, network_virtual_appliance_name: Optional[pulumi.Input[str]] = None, nva_sku: Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_appliance_asn: Optional[pulumi.Input[float]] = None, virtual_hub: Optional[pulumi.Input['SubResourceArgs']] = None): """ The set of arguments for constructing a NetworkVirtualAppliance resource. :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs. :param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text. :param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs. :param pulumi.Input[str] id: Resource ID. :param pulumi.Input['ManagedServiceIdentityArgs'] identity: The service principal that has read access to cloud-init and config blob. :param pulumi.Input[str] location: Resource location. :param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance. :param pulumi.Input['VirtualApplianceSkuPropertiesArgs'] nva_sku: Network Virtual Appliance SKU. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. :param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN. :param pulumi.Input['SubResourceArgs'] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed. """ pulumi.set(__self__, "resource_group_name", resource_group_name) if boot_strap_configuration_blobs is not None: pulumi.set(__self__, "boot_strap_configuration_blobs", boot_strap_configuration_blobs) if cloud_init_configuration is not None: pulumi.set(__self__, "cloud_init_configuration", cloud_init_configuration) if cloud_init_configuration_blobs is not None: pulumi.set(__self__, "cloud_init_configuration_blobs", cloud_init_configuration_blobs) if id is not None: pulumi.set(__self__, "id", id) if identity is not None: pulumi.set(__self__, "identity", identity) if location is not None: pulumi.set(__self__, "location", location) if network_virtual_appliance_name is not None: pulumi.set(__self__, "network_virtual_appliance_name", network_virtual_appliance_name) if nva_sku is not None: pulumi.set(__self__, "nva_sku", nva_sku) if tags is not None: pulumi.set(__self__, "tags", tags) if virtual_appliance_asn is not None: pulumi.set(__self__, "virtual_appliance_asn", virtual_appliance_asn) if virtual_hub is not None: pulumi.set(__self__, "virtual_hub", virtual_hub) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the resource group. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="bootStrapConfigurationBlobs") def boot_strap_configuration_blobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ BootStrapConfigurationBlobs storage URLs. """ return pulumi.get(self, "boot_strap_configuration_blobs") @boot_strap_configuration_blobs.setter def boot_strap_configuration_blobs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "boot_strap_configuration_blobs", value) @property @pulumi.getter(name="cloudInitConfiguration") def cloud_init_configuration(self) -> Optional[pulumi.Input[str]]: """ CloudInitConfiguration string in plain text. """ return pulumi.get(self, "cloud_init_configuration") @cloud_init_configuration.setter def cloud_init_configuration(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cloud_init_configuration", value) @property @pulumi.getter(name="cloudInitConfigurationBlobs") def cloud_init_configuration_blobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ CloudInitConfigurationBlob storage URLs. """ return pulumi.get(self, "cloud_init_configuration_blobs") @cloud_init_configuration_blobs.setter def cloud_init_configuration_blobs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "cloud_init_configuration_blobs", value) @property @pulumi.getter def id(self) -> Optional[pulumi.Input[str]]: """ Resource ID. """ return pulumi.get(self, "id") @id.setter def id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "id", value) @property @pulumi.getter def identity(self) -> Optional[pulumi.Input['ManagedServiceIdentityArgs']]: """ The service principal that has read access to cloud-init and config blob. """ return pulumi.get(self, "identity") @identity.setter def identity(self, value: Optional[pulumi.Input['ManagedServiceIdentityArgs']]): pulumi.set(self, "identity", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: """ Resource location. """ return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter(name="networkVirtualApplianceName") def network_virtual_appliance_name(self) -> Optional[pulumi.Input[str]]: """ The name of Network Virtual Appliance. """ return pulumi.get(self, "network_virtual_appliance_name") @network_virtual_appliance_name.setter def network_virtual_appliance_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "network_virtual_appliance_name", value) @property @pulumi.getter(name="nvaSku") def nva_sku(self) -> Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']]: """ Network Virtual Appliance SKU. """ return pulumi.get(self, "nva_sku") @nva_sku.setter def nva_sku(self, value: Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']]): pulumi.set(self, "nva_sku", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Resource tags. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @property @pulumi.getter(name="virtualApplianceAsn") def virtual_appliance_asn(self) -> Optional[pulumi.Input[float]]: """ VirtualAppliance ASN. """ return pulumi.get(self, "virtual_appliance_asn") @virtual_appliance_asn.setter def virtual_appliance_asn(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "virtual_appliance_asn", value) @property @pulumi.getter(name="virtualHub") def virtual_hub(self) -> Optional[pulumi.Input['SubResourceArgs']]: """ The Virtual Hub where Network Virtual Appliance is being deployed. """ return pulumi.get(self, "virtual_hub") @virtual_hub.setter def virtual_hub(self, value: Optional[pulumi.Input['SubResourceArgs']]): pulumi.set(self, "virtual_hub", value) class NetworkVirtualAppliance(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, cloud_init_configuration: Optional[pulumi.Input[str]] = None, cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, id: Optional[pulumi.Input[str]] = None, identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None, location: Optional[pulumi.Input[str]] = None, network_virtual_appliance_name: Optional[pulumi.Input[str]] = None, nva_sku: Optional[pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_appliance_asn: Optional[pulumi.Input[float]] = None, virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, __props__=None): """ NetworkVirtualAppliance Resource. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs. :param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text. :param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs. :param pulumi.Input[str] id: Resource ID. :param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: The service principal that has read access to cloud-init and config blob. :param pulumi.Input[str] location: Resource location. :param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance. :param pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']] nva_sku: Network Virtual Appliance SKU. :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. :param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN. :param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed. """ ... @overload def __init__(__self__, resource_name: str, args: NetworkVirtualApplianceArgs, opts: Optional[pulumi.ResourceOptions] = None): """ NetworkVirtualAppliance Resource. :param str resource_name: The name of the resource. :param NetworkVirtualApplianceArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(NetworkVirtualApplianceArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, cloud_init_configuration: Optional[pulumi.Input[str]] = None, cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, id: Optional[pulumi.Input[str]] = None, identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None, location: Optional[pulumi.Input[str]] = None, network_virtual_appliance_name: Optional[pulumi.Input[str]] = None, nva_sku: Optional[pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_appliance_asn: Optional[pulumi.Input[float]] = None, virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = NetworkVirtualApplianceArgs.__new__(NetworkVirtualApplianceArgs) __props__.__dict__["boot_strap_configuration_blobs"] = boot_strap_configuration_blobs __props__.__dict__["cloud_init_configuration"] = cloud_init_configuration __props__.__dict__["cloud_init_configuration_blobs"] = cloud_init_configuration_blobs __props__.__dict__["id"] = id __props__.__dict__["identity"] = identity __props__.__dict__["location"] = location __props__.__dict__["network_virtual_appliance_name"] = network_virtual_appliance_name __props__.__dict__["nva_sku"] = nva_sku if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["tags"] = tags __props__.__dict__["virtual_appliance_asn"] = virtual_appliance_asn __props__.__dict__["virtual_hub"] = virtual_hub __props__.__dict__["address_prefix"] = None __props__.__dict__["etag"] = None __props__.__dict__["inbound_security_rules"] = None __props__.__dict__["name"] = None __props__.__dict__["provisioning_state"] = None __props__.__dict__["type"] = None __props__.__dict__["virtual_appliance_nics"] = None __props__.__dict__["virtual_appliance_sites"] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20201101:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20191201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20191201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200401:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200401:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200501:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200501:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200601:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200601:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200701:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200701:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200801:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200801:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20210201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20210201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20210301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20210301:NetworkVirtualAppliance")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(NetworkVirtualAppliance, __self__).__init__( 'azure-native:network/v20201101:NetworkVirtualAppliance', resource_name, __props__, opts) # MASKED: get function (lines 329-362) @property @pulumi.getter(name="addressPrefix") def address_prefix(self) -> pulumi.Output[str]: """ Address Prefix. """ return pulumi.get(self, "address_prefix") @property @pulumi.getter(name="bootStrapConfigurationBlobs") def boot_strap_configuration_blobs(self) -> pulumi.Output[Optional[Sequence[str]]]: """ BootStrapConfigurationBlobs storage URLs. """ return pulumi.get(self, "boot_strap_configuration_blobs") @property @pulumi.getter(name="cloudInitConfiguration") def cloud_init_configuration(self) -> pulumi.Output[Optional[str]]: """ CloudInitConfiguration string in plain text. """ return pulumi.get(self, "cloud_init_configuration") @property @pulumi.getter(name="cloudInitConfigurationBlobs") def cloud_init_configuration_blobs(self) -> pulumi.Output[Optional[Sequence[str]]]: """ CloudInitConfigurationBlob storage URLs. """ return pulumi.get(self, "cloud_init_configuration_blobs") @property @pulumi.getter def etag(self) -> pulumi.Output[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]: """ The service principal that has read access to cloud-init and config blob. """ return pulumi.get(self, "identity") @property @pulumi.getter(name="inboundSecurityRules") def inbound_security_rules(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]: """ List of references to InboundSecurityRules. """ return pulumi.get(self, "inbound_security_rules") @property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="nvaSku") def nva_sku(self) -> pulumi.Output[Optional['outputs.VirtualApplianceSkuPropertiesResponse']]: """ Network Virtual Appliance SKU. """ return pulumi.get(self, "nva_sku") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[str]: """ The provisioning state of the resource. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Resource type. """ return pulumi.get(self, "type") @property @pulumi.getter(name="virtualApplianceAsn") def virtual_appliance_asn(self) -> pulumi.Output[Optional[float]]: """ VirtualAppliance ASN. """ return pulumi.get(self, "virtual_appliance_asn") @property @pulumi.getter(name="virtualApplianceNics") def virtual_appliance_nics(self) -> pulumi.Output[Sequence['outputs.VirtualApplianceNicPropertiesResponse']]: """ List of Virtual Appliance Network Interfaces. """ return pulumi.get(self, "virtual_appliance_nics") @property @pulumi.getter(name="virtualApplianceSites") def virtual_appliance_sites(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]: """ List of references to VirtualApplianceSite. """ return pulumi.get(self, "virtual_appliance_sites") @property @pulumi.getter(name="virtualHub") def virtual_hub(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]: """ The Virtual Hub where Network Virtual Appliance is being deployed. """ return pulumi.get(self, "virtual_hub")
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'NetworkVirtualAppliance': """ Get an existing NetworkVirtualAppliance resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = NetworkVirtualApplianceArgs.__new__(NetworkVirtualApplianceArgs) __props__.__dict__["address_prefix"] = None __props__.__dict__["boot_strap_configuration_blobs"] = None __props__.__dict__["cloud_init_configuration"] = None __props__.__dict__["cloud_init_configuration_blobs"] = None __props__.__dict__["etag"] = None __props__.__dict__["identity"] = None __props__.__dict__["inbound_security_rules"] = None __props__.__dict__["location"] = None __props__.__dict__["name"] = None __props__.__dict__["nva_sku"] = None __props__.__dict__["provisioning_state"] = None __props__.__dict__["tags"] = None __props__.__dict__["type"] = None __props__.__dict__["virtual_appliance_asn"] = None __props__.__dict__["virtual_appliance_nics"] = None __props__.__dict__["virtual_appliance_sites"] = None __props__.__dict__["virtual_hub"] = None return NetworkVirtualAppliance(resource_name, opts=opts, __props__=__props__)
329
362
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._enums import * from ._inputs import * __all__ = ['NetworkVirtualApplianceArgs', 'NetworkVirtualAppliance'] @pulumi.input_type class NetworkVirtualApplianceArgs: def __init__(__self__, *, resource_group_name: pulumi.Input[str], boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, cloud_init_configuration: Optional[pulumi.Input[str]] = None, cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, id: Optional[pulumi.Input[str]] = None, identity: Optional[pulumi.Input['ManagedServiceIdentityArgs']] = None, location: Optional[pulumi.Input[str]] = None, network_virtual_appliance_name: Optional[pulumi.Input[str]] = None, nva_sku: Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_appliance_asn: Optional[pulumi.Input[float]] = None, virtual_hub: Optional[pulumi.Input['SubResourceArgs']] = None): """ The set of arguments for constructing a NetworkVirtualAppliance resource. :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs. :param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text. :param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs. :param pulumi.Input[str] id: Resource ID. :param pulumi.Input['ManagedServiceIdentityArgs'] identity: The service principal that has read access to cloud-init and config blob. :param pulumi.Input[str] location: Resource location. :param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance. :param pulumi.Input['VirtualApplianceSkuPropertiesArgs'] nva_sku: Network Virtual Appliance SKU. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. :param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN. :param pulumi.Input['SubResourceArgs'] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed. """ pulumi.set(__self__, "resource_group_name", resource_group_name) if boot_strap_configuration_blobs is not None: pulumi.set(__self__, "boot_strap_configuration_blobs", boot_strap_configuration_blobs) if cloud_init_configuration is not None: pulumi.set(__self__, "cloud_init_configuration", cloud_init_configuration) if cloud_init_configuration_blobs is not None: pulumi.set(__self__, "cloud_init_configuration_blobs", cloud_init_configuration_blobs) if id is not None: pulumi.set(__self__, "id", id) if identity is not None: pulumi.set(__self__, "identity", identity) if location is not None: pulumi.set(__self__, "location", location) if network_virtual_appliance_name is not None: pulumi.set(__self__, "network_virtual_appliance_name", network_virtual_appliance_name) if nva_sku is not None: pulumi.set(__self__, "nva_sku", nva_sku) if tags is not None: pulumi.set(__self__, "tags", tags) if virtual_appliance_asn is not None: pulumi.set(__self__, "virtual_appliance_asn", virtual_appliance_asn) if virtual_hub is not None: pulumi.set(__self__, "virtual_hub", virtual_hub) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the resource group. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="bootStrapConfigurationBlobs") def boot_strap_configuration_blobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ BootStrapConfigurationBlobs storage URLs. """ return pulumi.get(self, "boot_strap_configuration_blobs") @boot_strap_configuration_blobs.setter def boot_strap_configuration_blobs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "boot_strap_configuration_blobs", value) @property @pulumi.getter(name="cloudInitConfiguration") def cloud_init_configuration(self) -> Optional[pulumi.Input[str]]: """ CloudInitConfiguration string in plain text. """ return pulumi.get(self, "cloud_init_configuration") @cloud_init_configuration.setter def cloud_init_configuration(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "cloud_init_configuration", value) @property @pulumi.getter(name="cloudInitConfigurationBlobs") def cloud_init_configuration_blobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ CloudInitConfigurationBlob storage URLs. """ return pulumi.get(self, "cloud_init_configuration_blobs") @cloud_init_configuration_blobs.setter def cloud_init_configuration_blobs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "cloud_init_configuration_blobs", value) @property @pulumi.getter def id(self) -> Optional[pulumi.Input[str]]: """ Resource ID. """ return pulumi.get(self, "id") @id.setter def id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "id", value) @property @pulumi.getter def identity(self) -> Optional[pulumi.Input['ManagedServiceIdentityArgs']]: """ The service principal that has read access to cloud-init and config blob. """ return pulumi.get(self, "identity") @identity.setter def identity(self, value: Optional[pulumi.Input['ManagedServiceIdentityArgs']]): pulumi.set(self, "identity", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: """ Resource location. """ return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter(name="networkVirtualApplianceName") def network_virtual_appliance_name(self) -> Optional[pulumi.Input[str]]: """ The name of Network Virtual Appliance. """ return pulumi.get(self, "network_virtual_appliance_name") @network_virtual_appliance_name.setter def network_virtual_appliance_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "network_virtual_appliance_name", value) @property @pulumi.getter(name="nvaSku") def nva_sku(self) -> Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']]: """ Network Virtual Appliance SKU. """ return pulumi.get(self, "nva_sku") @nva_sku.setter def nva_sku(self, value: Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']]): pulumi.set(self, "nva_sku", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Resource tags. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @property @pulumi.getter(name="virtualApplianceAsn") def virtual_appliance_asn(self) -> Optional[pulumi.Input[float]]: """ VirtualAppliance ASN. """ return pulumi.get(self, "virtual_appliance_asn") @virtual_appliance_asn.setter def virtual_appliance_asn(self, value: Optional[pulumi.Input[float]]): pulumi.set(self, "virtual_appliance_asn", value) @property @pulumi.getter(name="virtualHub") def virtual_hub(self) -> Optional[pulumi.Input['SubResourceArgs']]: """ The Virtual Hub where Network Virtual Appliance is being deployed. """ return pulumi.get(self, "virtual_hub") @virtual_hub.setter def virtual_hub(self, value: Optional[pulumi.Input['SubResourceArgs']]): pulumi.set(self, "virtual_hub", value) class NetworkVirtualAppliance(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, cloud_init_configuration: Optional[pulumi.Input[str]] = None, cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, id: Optional[pulumi.Input[str]] = None, identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None, location: Optional[pulumi.Input[str]] = None, network_virtual_appliance_name: Optional[pulumi.Input[str]] = None, nva_sku: Optional[pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_appliance_asn: Optional[pulumi.Input[float]] = None, virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, __props__=None): """ NetworkVirtualAppliance Resource. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs. :param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text. :param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs. :param pulumi.Input[str] id: Resource ID. :param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: The service principal that has read access to cloud-init and config blob. :param pulumi.Input[str] location: Resource location. :param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance. :param pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']] nva_sku: Network Virtual Appliance SKU. :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. :param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN. :param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed. """ ... @overload def __init__(__self__, resource_name: str, args: NetworkVirtualApplianceArgs, opts: Optional[pulumi.ResourceOptions] = None): """ NetworkVirtualAppliance Resource. :param str resource_name: The name of the resource. :param NetworkVirtualApplianceArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(NetworkVirtualApplianceArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, cloud_init_configuration: Optional[pulumi.Input[str]] = None, cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, id: Optional[pulumi.Input[str]] = None, identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None, location: Optional[pulumi.Input[str]] = None, network_virtual_appliance_name: Optional[pulumi.Input[str]] = None, nva_sku: Optional[pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, virtual_appliance_asn: Optional[pulumi.Input[float]] = None, virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = NetworkVirtualApplianceArgs.__new__(NetworkVirtualApplianceArgs) __props__.__dict__["boot_strap_configuration_blobs"] = boot_strap_configuration_blobs __props__.__dict__["cloud_init_configuration"] = cloud_init_configuration __props__.__dict__["cloud_init_configuration_blobs"] = cloud_init_configuration_blobs __props__.__dict__["id"] = id __props__.__dict__["identity"] = identity __props__.__dict__["location"] = location __props__.__dict__["network_virtual_appliance_name"] = network_virtual_appliance_name __props__.__dict__["nva_sku"] = nva_sku if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["tags"] = tags __props__.__dict__["virtual_appliance_asn"] = virtual_appliance_asn __props__.__dict__["virtual_hub"] = virtual_hub __props__.__dict__["address_prefix"] = None __props__.__dict__["etag"] = None __props__.__dict__["inbound_security_rules"] = None __props__.__dict__["name"] = None __props__.__dict__["provisioning_state"] = None __props__.__dict__["type"] = None __props__.__dict__["virtual_appliance_nics"] = None __props__.__dict__["virtual_appliance_sites"] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20201101:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20191201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20191201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200401:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200401:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200501:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200501:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200601:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200601:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200701:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200701:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20200801:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20200801:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20210201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20210201:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-native:network/v20210301:NetworkVirtualAppliance"), pulumi.Alias(type_="azure-nextgen:network/v20210301:NetworkVirtualAppliance")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(NetworkVirtualAppliance, __self__).__init__( 'azure-native:network/v20201101:NetworkVirtualAppliance', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'NetworkVirtualAppliance': """ Get an existing NetworkVirtualAppliance resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = NetworkVirtualApplianceArgs.__new__(NetworkVirtualApplianceArgs) __props__.__dict__["address_prefix"] = None __props__.__dict__["boot_strap_configuration_blobs"] = None __props__.__dict__["cloud_init_configuration"] = None __props__.__dict__["cloud_init_configuration_blobs"] = None __props__.__dict__["etag"] = None __props__.__dict__["identity"] = None __props__.__dict__["inbound_security_rules"] = None __props__.__dict__["location"] = None __props__.__dict__["name"] = None __props__.__dict__["nva_sku"] = None __props__.__dict__["provisioning_state"] = None __props__.__dict__["tags"] = None __props__.__dict__["type"] = None __props__.__dict__["virtual_appliance_asn"] = None __props__.__dict__["virtual_appliance_nics"] = None __props__.__dict__["virtual_appliance_sites"] = None __props__.__dict__["virtual_hub"] = None return NetworkVirtualAppliance(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="addressPrefix") def address_prefix(self) -> pulumi.Output[str]: """ Address Prefix. """ return pulumi.get(self, "address_prefix") @property @pulumi.getter(name="bootStrapConfigurationBlobs") def boot_strap_configuration_blobs(self) -> pulumi.Output[Optional[Sequence[str]]]: """ BootStrapConfigurationBlobs storage URLs. """ return pulumi.get(self, "boot_strap_configuration_blobs") @property @pulumi.getter(name="cloudInitConfiguration") def cloud_init_configuration(self) -> pulumi.Output[Optional[str]]: """ CloudInitConfiguration string in plain text. """ return pulumi.get(self, "cloud_init_configuration") @property @pulumi.getter(name="cloudInitConfigurationBlobs") def cloud_init_configuration_blobs(self) -> pulumi.Output[Optional[Sequence[str]]]: """ CloudInitConfigurationBlob storage URLs. """ return pulumi.get(self, "cloud_init_configuration_blobs") @property @pulumi.getter def etag(self) -> pulumi.Output[str]: """ A unique read-only string that changes whenever the resource is updated. """ return pulumi.get(self, "etag") @property @pulumi.getter def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]: """ The service principal that has read access to cloud-init and config blob. """ return pulumi.get(self, "identity") @property @pulumi.getter(name="inboundSecurityRules") def inbound_security_rules(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]: """ List of references to InboundSecurityRules. """ return pulumi.get(self, "inbound_security_rules") @property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Resource name. """ return pulumi.get(self, "name") @property @pulumi.getter(name="nvaSku") def nva_sku(self) -> pulumi.Output[Optional['outputs.VirtualApplianceSkuPropertiesResponse']]: """ Network Virtual Appliance SKU. """ return pulumi.get(self, "nva_sku") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[str]: """ The provisioning state of the resource. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Resource type. """ return pulumi.get(self, "type") @property @pulumi.getter(name="virtualApplianceAsn") def virtual_appliance_asn(self) -> pulumi.Output[Optional[float]]: """ VirtualAppliance ASN. """ return pulumi.get(self, "virtual_appliance_asn") @property @pulumi.getter(name="virtualApplianceNics") def virtual_appliance_nics(self) -> pulumi.Output[Sequence['outputs.VirtualApplianceNicPropertiesResponse']]: """ List of Virtual Appliance Network Interfaces. """ return pulumi.get(self, "virtual_appliance_nics") @property @pulumi.getter(name="virtualApplianceSites") def virtual_appliance_sites(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]: """ List of references to VirtualApplianceSite. """ return pulumi.get(self, "virtual_appliance_sites") @property @pulumi.getter(name="virtualHub") def virtual_hub(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]: """ The Virtual Hub where Network Virtual Appliance is being deployed. """ return pulumi.get(self, "virtual_hub")
__init__
Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type.
"""This module defines classes that handle mesh and mesh operations. This module defines a factory class for mesh, similar to geometry and size function factory class. It also defines concrete mesh types. Currently two concrete mesh types are defined for generic Eucledian mesh and specific 2D Eucledian mesh. """ from functools import lru_cache import logging from multiprocessing import Pool, cpu_count import os import pathlib from collections import defaultdict import warnings from typing import Union, List, Tuple, Dict, Any, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal import pandas as pd import geopandas as gpd from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk from matplotlib.path import Path from matplotlib.transforms import Bbox from matplotlib.tri import Triangulation from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np import numpy.typing as npt from pyproj import CRS, Transformer from scipy.interpolate import ( RectBivariateSpline, RegularGridInterpolator) from shapely.geometry import ( LineString, box, Polygon, MultiPolygon) from shapely.ops import polygonize, linemerge from ocsmesh import utils from ocsmesh.raster import Raster from ocsmesh.mesh.base import BaseMesh from ocsmesh.mesh.parsers import grd, sms2dm _logger = logging.getLogger(__name__) class EuclideanMesh(BaseMesh): """Generic Euclidean mesh class This is the base class for 2D or 3D Euclidean mesh. Attributes ---------- tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t] Reference to underlying jigsaw mesh's triangle element structure. triangles : npt.NDArray[np.float32] Array of node index for triangular elements. quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t] Reference to underlying jigsaw mesh's quadrangle element structure. quads : npt.NDArray[np.float32] Array of node index for quadrangular elements. crs : CRS Coodrinate reference system of the mesh object hull : Hull Handle to hull calculation helper object nodes : Nodes Handle to node handler helper object elements : Elements Handle to element handler helper object Methods ------- write(path, overwrite=False, format='grd') Export mesh object to the disk in the specified format. """ # MASKED: __init__ function (lines 81-115) def write( self, path: Union[str, os.PathLike], overwrite: bool = False, format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622 ) -> None: """Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported. """ path = pathlib.Path(path) if path.exists() and overwrite is not True: raise IOError( f'File {str(path)} exists and overwrite is not True.') if format == 'grd': grd_dict = utils.msh_t_to_grd(self.msh_t) if self._boundaries and self._boundaries.data: grd_dict.update(boundaries=self._boundaries.data) grd.write(grd_dict, path, overwrite) elif format == '2dm': sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite) elif format == 'msh': savemsh(str(path), self.msh_t) elif format == 'vtk': savevtk(str(path), self.msh_t) else: raise ValueError(f'Unhandled format {format}.') @property def tria3(self): """Reference to underlying mesh tirangle element structure""" return self.msh_t.tria3 @property def triangles(self): """Reference to underlying mesh triangle element index array""" return self.msh_t.tria3['index'] @property def quad4(self): """Reference to underlying mesh quadrangle element structure""" return self.msh_t.quad4 @property def quads(self): """Reference to underlying mesh quadrangle element index array""" return self.msh_t.quad4['index'] @property def crs(self): """Reference to underlying mesh crs""" return self.msh_t.crs @property def hull(self): """Reference to hull calculator helper object""" if self._hull is None: self._hull = Hull(self) return self._hull @property def nodes(self): """Reference to node handler helper object""" if self._nodes is None: self._nodes = Nodes(self) return self._nodes @property def elements(self): """Reference to element handler helper object""" if self._elements is None: self._elements = Elements(self) return self._elements class EuclideanMesh2D(EuclideanMesh): """2D Euclidean mesh definition Attributes ---------- boundaries vert2 value bbox Methods ------- get_bbox(crs=None, output_type=None) Gets the bounding box of the mesh elements. tricontourf(**kwargs) Create a contour plot from the value data on the nodes of the mesh interpolate(raster, method='spline', nprocs=None) Interpolate raster date on the nodes. get_contour(level) Get contour lines from node value data at specified levels. get_multipolygon(zmin=None, zmax=None) Get multipolygon of the mesh hull. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``. """ super().__init__(mesh) self._boundaries = None if mesh.ndims != +2: raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, ' "but expected ndims=2.") if len(self.msh_t.value) == 0: self.msh_t.value = np.array( np.full((self.vert2['coord'].shape[0], 1), np.nan)) def get_bbox( self, crs: Union[str, CRS, None] = None, output_type: Literal[None, 'polygon', 'bbox'] = None ) -> Union[Polygon, Bbox]: """Get the bounding box of mesh elements. Parameters ---------- crs : str or CRS or None, default=None CRS to transform the calculated bounding box into before returning output_type : { None, 'polygon', 'bbox'}, default=None Output type Returns ------- Polygon or Bbox Bounding box of the mesh elements. """ output_type = 'polygon' if output_type is None else output_type xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0]) ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1]) crs = self.crs if crs is None else crs if crs is not None: if not self.crs.equals(crs): transformer = Transformer.from_crs( self.crs, crs, always_xy=True) # pylint: disable=E0633 (xmin, xmax), (ymin, ymax) = transformer.transform( (xmin, xmax), (ymin, ymax)) if output_type == 'polygon': # pylint: disable=R1705 return box(xmin, ymin, xmax, ymax) elif output_type == 'bbox': return Bbox([[xmin, ymin], [xmax, ymax]]) raise TypeError( 'Argument output_type must a string literal \'polygon\' or ' '\'bbox\'') @property def boundaries(self): """Handle to boundaries calculator helper object""" if self._boundaries is None: self._boundaries = Boundaries(self) return self._boundaries def tricontourf(self, **kwargs) -> Axes: """Generate contour for the data of triangular elements of the mesh Parameters ---------- **kwargs : dict, optional Passed to underlying `matplotlib` API. Returns ------- Axes Axes on which the filled contour is drawn. """ return utils.tricontourf(self.msh_t, **kwargs) def interpolate( self, raster: Union[Raster, List[Raster]], method: Literal['spline', 'linear', 'nearest'] = 'spline', nprocs: Optional[int] = None, info_out_path: Union[pathlib.Path, str, None] = None, filter_by_shape: bool = False ) -> None: """Interplate values from raster inputs to the mesh nodes. Parameters ---------- raster : Raster or list of Raster A single or a list of rasters from which values are interpolated onto the mesh method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. nprocs : int or None, default=None Number of workers to use when interpolating data. info_out_path : pathlike or str or None Path for the output node interpolation information file filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- None """ if isinstance(raster, Raster): raster = [raster] nprocs = -1 if nprocs is None else nprocs nprocs = cpu_count() if nprocs == -1 else nprocs # Fix an issue on Jupyter notebook where having pool execute # interpolation even in case of nprocs == 1 would results in # application getting stuck if nprocs > 1: with Pool(processes=nprocs) as pool: res = pool.starmap( _mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] ) pool.join() else: res = [_mesh_interpolate_worker( self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] values = self.msh_t.value.flatten() interp_info_map = {} for (mask, _values), rast in zip(res, raster): values[mask] = _values if info_out_path is not None: vert_cs = None rast_crs = rast.crs if rast_crs.is_vertical: if rast_crs.sub_crs_list is not None: for sub_crs in rast_crs.sub_crs_list: if sub_crs.is_vertical: # TODO: What if sub CRS is compound, etc.? vert_cs = sub_crs elif rast_crs.source_crs is not None: if rast_crs.source_crs.is_vertical: # TODO: What if source CRS is compound, etc.? vert_cs = rast_crs.source_crs vert_cs_name = vert_cs.name idxs = np.argwhere(mask).ravel() interp_info_map.update({ idx: (rast.path, vert_cs_name) for idx in idxs}) if info_out_path is not None: coords = self.msh_t.vert2['coord'].copy() geo_coords = coords.copy() if not self.crs.is_geographic: transformer = Transformer.from_crs( self.crs, CRS.from_epsg(4326), always_xy=True) # pylint: disable=E0633 geo_coords[:, 0], geo_coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) vd_idxs=np.array(list(interp_info_map.keys())) df_interp_info = pd.DataFrame( index=vd_idxs, data={ 'x': coords[vd_idxs, 0], 'y': coords[vd_idxs, 1], 'lat': geo_coords[vd_idxs, 0], 'lon': geo_coords[vd_idxs, 1], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()] } ) df_interp_info.sort_index().to_csv( info_out_path, header=False, index=True) self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) def get_contour(self, level: float) -> LineString: """Extract contour lines at the specified `level` from mesh values Parameters ---------- level : float The level at which contour lines must be extracted. Returns ------- LineString Extracted and merged contour lines. Raises ------ ValueError If mesh has nodes that have null value `np.nan`. """ # ONLY SUPPORTS TRIANGLES for attr in ['quad4', 'hexa8']: if len(getattr(self.msh_t, attr)) > 0: warnings.warn( 'Mesh contour extraction only supports triangles') coords = self.msh_t.vert2['coord'] values = self.msh_t.value trias = self.msh_t.tria3['index'] if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") x, y = coords[:, 0], coords[:, 1] features = [] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) _logger.debug('Computing contours...') fig, ax = plt.subplots() ax.tricontour( x, y, trias, values.ravel(), levels=[level]) plt.close(fig) for path_collection in ax.collections: for path in path_collection.get_paths(): try: features.append(LineString(path.vertices)) except ValueError: # LineStrings must have at least 2 coordinate tuples pass return linemerge(features) def get_multipolygon( self, zmin: Optional[float] = None, zmax: Optional[float] = None ) -> MultiPolygon: """Calculate multipolygon covering mesh elements (hull) Parameters ---------- zmin : float or None Minimum elevation to consider for multipolygon extraction zmax : float or None Maximum elevation to consider for multipolygon extraction Returns ------- MultiPolygon Calculated multipolygon shape """ values = self.msh_t.value mask = np.ones(values.shape) if zmin is not None: mask = np.logical_and(mask, values > zmin) if zmax is not None: mask = np.logical_and(mask, values < zmax) # Assuming value is of shape (N, 1) # ravel to make sure it's 1D verts_in = np.argwhere(mask).ravel() clipped_mesh = utils.clip_mesh_by_vertex( self.msh_t, verts_in, can_use_other_verts=True) boundary_edges = utils.get_boundary_edges(clipped_mesh) coords = clipped_mesh.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} poly_gen = polygonize(coords[boundary_edges]) polys = list(poly_gen) polys = sorted(polys, key=lambda p: p.area, reverse=True) rings = [p.exterior for p in polys] n_parents = np.zeros((len(rings),)) represent = np.array([r.coords[0] for r in rings]) for e, ring in enumerate(rings[:-1]): path = Path(ring.coords, closed=True) n_parents = n_parents + np.pad( np.array([ path.contains_point(pt) for pt in represent[e+1:]]), (e+1, 0), 'constant', constant_values=0) # Get actual polygons based on logic described above polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2] return MultiPolygon(polys) @property def vert2(self): """Reference to underlying mesh 2D vertices structure""" return self.msh_t.vert2 @property def value(self): """Reference to underlying mesh values""" return self.msh_t.value @property def bbox(self): """Calculates and returns bounding box of the mesh hull. See Also -------- get_bbox """ return self.get_bbox() MeshType = Union[EuclideanMesh2D] class Mesh(BaseMesh): """Mesh object factory Factory class that creates and returns concrete mesh object based on the input types. Methods ------- open(path, crs=None) Read mesh data from a file on disk. """ def __new__(cls, mesh: jigsaw_msh_t) -> MeshType: """Construct a concrete mesh object. Parameters ---------- mesh : jigsaw_msh_t Input jigsaw mesh object Returns ------- MeshType Mesh object created from the input Raises ------ TypeError Input `mesh` is not a `jigsaw_msh_t` object. NotImplementedError Input `mesh` object cannot be used to create a EuclideanMesh2D """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID == 'euclidean-mesh': if mesh.ndims == 2: return EuclideanMesh2D(mesh) raise NotImplementedError( f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not ' 'handled.') raise NotImplementedError(f'mshID={mesh.mshID} not handled.') @staticmethod def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType: """Read mesh from a file on disk Parameters ---------- path : path-like Path to the file containig mesh. crs : CRS or None, default=None CRS of the mesh in the path. Overwrites any info read from file, no transformation is done. Returns ------- MeshType Mesh object created by reading the file. Raises ------ TypeError If cannot determine the input mesh type. Notes ----- Currently only SMS-2DM and GRD formats are supported for reading. """ try: msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs)) msh_t.value = np.negative(msh_t.value) return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 if 'not a valid grd file' in str(e): pass else: raise e try: return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs))) except ValueError: pass try: msh_t = jigsaw_msh_t() loadmsh(msh_t, path) msh_t.crs = crs return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 pass raise TypeError( f'Unable to automatically determine file type for {str(path)}.') class Rings: """Helper class for handling mesh rings. This is a helper class to manage the calculation of internal and external rings of the mesh polygon or hull. Attributes ---------- Methods ------- __call__() Returns all rings of the mesh hull interior() Return the interior rings of the mesh hull exterior() Return the exterior rings of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the ring calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates rings. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ polys = utils.get_mesh_polygons(self.mesh.msh_t) data = [] bnd_id = 0 for poly in polys: data.append({ "geometry": poly.exterior, "bnd_id": bnd_id, "type": 'exterior' }) for interior in poly.interiors: data.append({ "geometry": interior, "bnd_id": bnd_id, "type": 'interior' }) bnd_id = bnd_id + 1 return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Extracts the exterior ring from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior ring of the mesh hull polygon. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Extracts the interior rings from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior rings of the mesh hull polygon. """ return self().loc[self()['type'] == 'interior'] class Edges: """Helper class for handling mesh boundary edges. Attributes ---------- Methods ------- __call__() Return all boundary edges of the mesh hull interior() Return the interior boundary edges of the mesh hull exterior() Return the exterior boundary edges of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the edge calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which boundary edges are calculated. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for ring in self.mesh.hull.rings().itertuples(): coords = ring.geometry.coords for i in range(1, len(coords)): data.append({ "geometry": LineString([coords[i-1], coords[i]]), "bnd_id": ring.bnd_id, "type": ring.type}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Retruns exterior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Retruns interior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'interior'] class Hull: """Helper class for handling mesh hull calculations. This class wraps the functionality of ring and edge classes and adds additional methods to calculate or extract the polygon or triangulation of the mesh Attributes ---------- Methods ------- __call__() Calculates all the polys from all mesh rings exterior() Calculates the exterior rings of the mesh hull. interior() Calculates the interior rings of the mesh hull. implode() Calculates all the polygons (including isolated domain islands) in the mesh and returns a table of polygons. multipolygon() Calculates all the polygons (including isolated domain islands) in the mesh and returns a multipolygon. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize helper class for handling mesh hull calculations Parameters ---------- mesh : EuclideanMesh Input mesh for which hull calculations are done. Notes ----- This object holds onto the ring and edge calculator objects as well as a reference to the input mesh. """ self.mesh = mesh self.rings = Rings(mesh) self.edges = Edges(mesh) @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for bnd_id in np.unique(self.rings()['bnd_id'].tolist()): exterior = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior')] interiors = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior')] data.append({ "geometry": Polygon( exterior.iloc[0].geometry.coords, [row.geometry.coords for _, row in interiors.iterrows()]), "bnd_id": bnd_id }) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Creates polygons from exterior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from exterior rings of the mesh hull """ data = [] for exterior in self.rings().loc[ self.rings()['type'] == 'exterior'].itertuples(): data.append({"geometry": Polygon(exterior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def interior(self) -> gpd.GeoDataFrame: """Creates polygons from interior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from interior rings of the mesh hull """ data = [] for interior in self.rings().loc[ self.rings()['type'] == 'interior'].itertuples(): data.append({"geometry": Polygon(interior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def implode(self) -> gpd.GeoDataFrame: """Creates a dataframe from mesh polygons. Parameters ---------- Returns ------ gpd.GeoDataFrame Dataframe containing polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The difference of the return value of this method and `__call__` is that the `implode` returns a dataframe with a single `MultiPolygon` where as `__call__` returns a dataframe with multiple `Polygon` entries with associated `bnd_id`. """ return gpd.GeoDataFrame( {"geometry": MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) def multipolygon(self) -> MultiPolygon: """Returns mesh multi-polygons. Parameters ---------- Returns ------ MultiPolygon Combined shape of polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. implode() Dataframe with a single combined multipolygon of the mesh polygons. Notes ----- The difference of the return value of this method and `implode` is that `multipolygon` returns a `MultiPolygon` object where as `implode` returns a dataframe warpping the multipolygon object. """ mp = self.implode().iloc[0].geometry if isinstance(mp, Polygon): mp = MultiPolygon([mp]) return mp def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.mesh.msh_t.tria3['index'].tolist() for quad in self.mesh.msh_t.quad4['index']: triangles.extend([ [quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]] ]) return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) class Nodes: """Helper class for handling mesh nodes. Attributes ---------- id_to_index : dict Mapping to convert node IDs to node indexes. index_to_id : dict Mapping to convert node indexes to node IDs. Methods ------- __call__() Creates a mapping between node IDs (index + 1) and node coordinates id() Returns list of node IDs. index() Return array of node indices. coords() Return mesh coordinates. values() Return values stored for mesh nodes. get_index_by_id(node_id) Get the node index based on node ID. get_id_by_index(index) Get the node ID based on the node index. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes node handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles nodes info. """ self.mesh = mesh self._id_to_index = None self._index_to_id = None @lru_cache(maxsize=1) def __call__(self) -> Dict[int, int]: """Creates a mapping between node IDs and indexes. Parameters ---------- Returns ------- dict Mapping between node IDs and indexes. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return {i+1: coord for i, coord in enumerate(self.coords())} def id(self) -> List[int]: """Retrives a list of element IDs. Parameters ---------- Returns ------- list of int List of node IDs as created by `__call__` """ return list(self().keys()) def index(self) -> npt.NDArray[int]: """Retrives an array of element indexes. Parameters ---------- Returns ------- array-like Array of node indexes. """ return np.arange(len(self())) def coords(self) -> npt.NDArray[np.float32]: """Retrieve the coordinates of mesh nodes Parameters ---------- Returns ------- array-like Coordinates of the mesh nodes as returned by `BaseMesh.coord` """ return self.mesh.coord def values(self): """Retrieve the values stored for mesh nodes Parameters ---------- Returns ------- array-like Values on the mesh nodes as returned by `BaseMesh.values` """ return self.mesh.values def get_index_by_id(self, node_id): """Converts mesh ID to mesh index. Parameters ---------- node_id : int ID of the node of interest Returns ------- int Index of the node of interest """ return self.id_to_index[node_id] def get_id_by_index(self, index: int): """Converts mesh index to mesh ID. Parameters ---------- index : int Index of the node of interest. Returns ------- int ID of the node of interest """ return self.index_to_id[index] @property def id_to_index(self) -> Dict[int, int]: """Read-only property returning the mapping of ID to index Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._id_to_index is None: self._id_to_index = {node_id: index for index, node_id in enumerate(self().keys())} return self._id_to_index @property def index_to_id(self) -> Dict[int, int]: """Read-only property returning the mapping of index to ID Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._index_to_id is None: self._index_to_id = dict(enumerate(self().keys())) return self._index_to_id # def get_indexes_around_index(self, index): # indexes_around_index = self.__dict__.get('indexes_around_index') # if indexes_around_index is None: # def append(geom): # for simplex in geom: # for i, j in permutations(simplex, 2): # indexes_around_index[i].add(j) # indexes_around_index = defaultdict(set) # append(self.gr3.elements.triangles()) # append(self.gr3.elements.quads()) # self.__dict__['indexes_around_index'] = indexes_around_index # return list(indexes_around_index[index]) class Elements: """Helper class for handling mesh elements. Attributes ---------- Methods -------- __call__() Creates a mapping between element IDs and associated node IDs. id() Returns a list of element IDs. index() Returns an array of element indexes. array() Creates and returns a masked array of element node indices. triangles() Creates and returns a 2D array of triangular element node indices. quads() Creates and returns a 2D array of quadrangular element node indices. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. geodataframe() Creates and returns a dataframe of with polygon entires for each element. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize the element handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles elements info. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> Dict[int, npt.NDArray[int]]: """Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ elements = {i+1: index+1 for i, index in enumerate(self.mesh.msh_t.tria3['index'])} elements.update({i+len(elements)+1: index+1 for i, index in enumerate(self.mesh.msh_t.quad4['index'])}) return elements @lru_cache(maxsize=1) def id(self) -> List[int]: """Retrieves the list of element IDs as returned by `__call__` Parameters ---------- Returns ------- list of int List of element IDs. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return list(self().keys()) @lru_cache(maxsize=1) def index(self) -> npt.NDArray[int]: """Retrieves an array of element indices Parameters ---------- Returns ------- npt.NDArray 1D array of element indices. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.arange(len(self())) def array(self) -> npt.NDArray[int]: """Retrieves a masked array of element node IDs. The return value is ``n x m`` where ``n`` is the number of elements and ``m`` is the maximum number of element nodes, e.g. if there are only trias, then it's 3, for trias and quads it is 4. Parameters ---------- Returns ------- npt.NDArray Masked array where elements with fewer associated nodes have trailing masked node columns in the array. """ rank = int(max(map(len, self().values()))) array = np.full((len(self()), rank), -1) for i, elem_nd_ids in enumerate(self().values()): row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))) array[i, :len(row)] = row return np.ma.masked_equal(array, -1) @lru_cache(maxsize=1) def triangles(self) -> npt.NDArray[int]: """Retrieves an array of tria element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for triangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 3]) @lru_cache(maxsize=1) def quads(self): """Retrieves an array of quad element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for quadrangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 4]) def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.triangles().tolist() for quad in self.quads(): # TODO: Not tested. triangles.append([quad[0], quad[1], quad[3]]) triangles.append([quad[1], quad[2], quad[3]]) return Triangulation( self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) def geodataframe(self) -> gpd.GeoDataFrame: """Create polygons for each element and return in dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe created from entries of `Polygon` type for each element. """ data = [] for elem_id, elem_nd_ids in self().items(): data.append({ 'geometry': Polygon( self.mesh.coord[list( map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) class Boundaries: """Helper class for mesh boundary condition calculation Attributes ---------- data : dict Mapping for boundary information Methods ------- __call__() Retrieves a dataframe for all boundary shapes and type info. __len__() Gets the number of calculated boundary segments. ocean() Retrieves a dataframe containing shapes and type info of ocean boundaries land() Retrieves a dataframe containing shapes and type info of land boundaries interior() Retrieves a dataframe containing shapes and type info of island boundaries auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1) Automatically generate boundary information based on the input land indicator `threshold` """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize boundary helper object Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates boundaries. """ # TODO: Add a way to manually initialize self.mesh = mesh self._ocean = gpd.GeoDataFrame() self._land = gpd.GeoDataFrame() self._interior = gpd.GeoDataFrame() self._data = defaultdict(defaultdict) @lru_cache(maxsize=1) def _init_dataframes(self) -> None: """Internal: Creates boundary dataframes based on boundary data Parameters ---------- Returns ------- None Notes ----- This method doesn't have any return value, but it is cached so that on re-execution it doesn't recalculate. """ boundaries = self._data ocean_boundaries = [] land_boundaries = [] interior_boundaries = [] if boundaries is not None: for ibtype, bnds in boundaries.items(): if ibtype is None: for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) ocean_boundaries.append({ 'id': bnd_id, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) elif str(ibtype).endswith('1'): for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) interior_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) else: for bnd_id, data in bnds.items(): _indexes = np.array(data['indexes']) if _indexes.ndim > 1: # ndim > 1 implies we're dealing with an ADCIRC # mesh that includes boundary pairs, such as weir new_indexes = [] for i, line in enumerate(_indexes.T): if i % 2 != 0: new_indexes.extend(np.flip(line)) else: new_indexes.extend(line) _indexes = np.array(new_indexes).flatten() else: _indexes = _indexes.flatten() indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes)) land_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) self._ocean = gpd.GeoDataFrame(ocean_boundaries) self._land = gpd.GeoDataFrame(land_boundaries) self._interior = gpd.GeoDataFrame(interior_boundaries) def ocean(self) -> gpd.GeoDataFrame: """Retrieve the ocean boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of ocean open boundary. """ self._init_dataframes() return self._ocean def land(self): """Retrieve the land boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of land boundary. """ self._init_dataframes() return self._land def interior(self): """Retrieve the island boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of island boundary. """ self._init_dataframes() return self._interior @property def data(self) -> Dict[Optional[int], Any]: """Read-only property referencing the boundary data dictionary""" return self._data @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ self._init_dataframes() data = [] for bnd in self.ocean().itertuples(): data.append({ 'id': bnd.id, 'ibtype': None, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.land().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.interior().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def __len__(self) -> int: """Returns the number of boundary segments""" return len(self()) def auto_generate( self, threshold: float = 0., land_ibtype: int = 0, interior_ibtype: int = 1, ): """Automatically detect boundaries based on elevation data. Parameters ---------- threshold : float, default=0 Threshold above which nodes are considered dry nodes for ocean vs land boundary detection land_ibtype : int, default=0 Value to assign to land boundary type interior_ibtype : int, default=1 Value to assign to island boundary type Returns ------- None Raises ------ ValueError If any of the values assigned to a mesh node is `np.nan`. Notes ----- An edge is considered dry if any of the attached nodes are dry (its elevation is larger than or equal to the `threshold`). """ values = self.mesh.value if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") coords = self.mesh.msh_t.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} polys = utils.get_mesh_polygons(self.mesh.msh_t) # TODO: Split using shapely to get bdry segments boundaries = defaultdict(defaultdict) bdry_type = dict get_id = self.mesh.nodes.get_id_by_index # generate exterior boundaries for poly in polys: ext_ring_coo = poly.exterior.coords ext_ring = np.array([ (coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[e + 1]]) for e, coo in enumerate(ext_ring_coo[:-1])]) # find boundary edges edge_tag = np.full(ext_ring.shape, 0) edge_tag[ np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1 edge_tag[ np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1 edge_tag[ np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1 edge_tag[ np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1 # sort boundary edges ocean_boundary = [] land_boundary = [] for i, (e0, e1) in enumerate(edge_tag): if np.any(np.asarray((e0, e1)) == 1): land_boundary.append(tuple(ext_ring[i, :])) elif np.any(np.asarray((e0, e1)) == -1): ocean_boundary.append(tuple(ext_ring[i, :])) # ocean_boundaries = utils.sort_edges(ocean_boundary) # land_boundaries = utils.sort_edges(land_boundary) ocean_boundaries = [] if len(ocean_boundary) != 0: #pylint: disable=not-an-iterable ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist()) ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs ocean_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in ocean_segs] land_boundaries = [] if len(land_boundary) != 0: #pylint: disable=not-an-iterable land_segs = linemerge(coords[np.array(land_boundary)].tolist()) land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs land_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in land_segs] _bnd_id = len(boundaries[None]) for bnd in ocean_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[None][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # add land boundaries _bnd_id = len(boundaries[land_ibtype]) for bnd in land_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[land_ibtype][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # generate interior boundaries _bnd_id = 0 interior_boundaries = defaultdict() for poly in polys: interiors = poly.interiors for interior in interiors: int_ring_coo = interior.coords int_ring = [ (coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[e + 1]]) for e, coo in enumerate(int_ring_coo[:-1])] # TODO: Do we still need these? e0, e1 = [list(t) for t in zip(*int_ring)] if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0: e0 = e0[::-1] e1 = e1[::-1] e0 = [get_id(vert) for vert in e0] e0.append(e0[0]) interior_boundaries[_bnd_id] = e0 _bnd_id += 1 for bnd_id, data in interior_boundaries.items(): boundaries[interior_ibtype][bnd_id] = bdry_type( indexes=data, properties={}) self._data = boundaries self._init_dataframes.cache_clear() self.__call__.cache_clear() self._init_dataframes() SortedRingType = Dict[int, Dict[Literal['exterior', 'interiors'], Union[npt.NDArray, List[npt.NDArray]]] ] def sort_rings( index_rings: List[List[Tuple[int, int]]], vertices: npt.NDArray[np.float32]) -> SortedRingType: """Sorts a list of index-rings. Takes a list of unsorted index rings and sorts them into "exterior" and "interior" components. Any doubly-nested rings are considered exterior rings. Parameters ---------- index_rings : List[List[Tuple[int, int]]] Unosorted list of list of mesh edges as specified by end node indexs of each edge. vertices : npt.NDArray[np.float32] 2D ``n x 2`` array of node coordinate couples. Returns ------- SortedRingType Dictionary of information aboout polygon boundaries extracted based on the input Notes ----- The return value is a mapping of ring index to dictionary containing exterior and interior linear ring information as numpy array This function is not currently used, instead a different faster approach is used for boundary and polygon calculation from elements. """ # TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can # probably be optimized using shapely. # sort index_rings into corresponding "polygons" areas = [] for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = {} _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = [] for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = [] for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings def _mesh_interpolate_worker( coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[str, Path], chunk_size: Optional[int], method: Literal['spline', 'linear', 'nearest'] = "spline", filter_by_shape: bool = False): """Interpolator worker function to be used in parallel calls Parameters ---------- coords : npt.NDArray[np.float32] Mesh node coordinates. coords_crs : CRS Coordinate reference system of the input mesh coordinates. raster_path : str or Path Path to the raster temporary working file. chunk_size : int or None Chunk size for windowing over the raster. method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- idxs : npt.NDArray[bool] Mask of the nodes whose values are updated by current interpolation values : npt.NDArray[np.float32] Interpolated values. Raises ------ ValueError If specified interpolation `method` is not supported. """ coords = np.array(coords) raster = Raster(raster_path) idxs = [] values = [] for window in raster.iter_windows(chunk_size=chunk_size, overlap=2): if not raster.crs.equals(coords_crs): transformer = Transformer.from_crs( coords_crs, raster.crs, always_xy=True) # pylint: disable=E0633 coords[:, 0], coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) xi = raster.get_x(window) yi = raster.get_y(window) # Use masked array to ignore missing values from DEM zi = raster.get_values(window=window, masked=True) if not filter_by_shape: _idxs = np.logical_and( np.logical_and( np.min(xi) <= coords[:, 0], np.max(xi) >= coords[:, 0]), np.logical_and( np.min(yi) <= coords[:, 1], np.max(yi) >= coords[:, 1])) else: shape = raster.get_multipolygon() gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1]) _idxs = gs_pt.intersects(shape) interp_mask = None if method == 'spline': f = RectBivariateSpline( xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0, # bbox=[min(x), max(x), min(y), max(y)] # ?? ) _values = f.ev(coords[_idxs, 0], coords[_idxs, 1]) elif method in ['nearest', 'linear']: # Inspired by StackOverflow 35807321 if np.any(zi.mask): m_interp = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method ) # Pick nodes NOT "contaminated" by masked values interp_mask = m_interp(coords[_idxs]) > 0 f = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi).T, method=method ) _values = f(coords[_idxs]) else: raise ValueError( f"Invalid value method specified <{method}>!") if interp_mask is not None: # pylint: disable=invalid-unary-operand-type helper = np.ones_like(_values).astype(bool) helper[interp_mask] = False # _idxs is inverse mask _idxs[_idxs] = helper _values = _values[~interp_mask] idxs.append(_idxs) values.append(_values) return (np.hstack(idxs), np.hstack(values))
def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type. """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID != 'euclidean-mesh': raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, ' "but expected 'euclidean-mesh'.") if not hasattr(mesh, 'crs'): warnings.warn('Input mesh has no CRS information.') mesh.crs = None else: if not isinstance(mesh.crs, CRS): raise ValueError(f'crs property must be of type {CRS}, not ' f'type {type(mesh.crs)}.') self._hull = None self._nodes = None self._elements = None self._msh_t = mesh
81
115
"""This module defines classes that handle mesh and mesh operations. This module defines a factory class for mesh, similar to geometry and size function factory class. It also defines concrete mesh types. Currently two concrete mesh types are defined for generic Eucledian mesh and specific 2D Eucledian mesh. """ from functools import lru_cache import logging from multiprocessing import Pool, cpu_count import os import pathlib from collections import defaultdict import warnings from typing import Union, List, Tuple, Dict, Any, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal import pandas as pd import geopandas as gpd from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk from matplotlib.path import Path from matplotlib.transforms import Bbox from matplotlib.tri import Triangulation from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np import numpy.typing as npt from pyproj import CRS, Transformer from scipy.interpolate import ( RectBivariateSpline, RegularGridInterpolator) from shapely.geometry import ( LineString, box, Polygon, MultiPolygon) from shapely.ops import polygonize, linemerge from ocsmesh import utils from ocsmesh.raster import Raster from ocsmesh.mesh.base import BaseMesh from ocsmesh.mesh.parsers import grd, sms2dm _logger = logging.getLogger(__name__) class EuclideanMesh(BaseMesh): """Generic Euclidean mesh class This is the base class for 2D or 3D Euclidean mesh. Attributes ---------- tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t] Reference to underlying jigsaw mesh's triangle element structure. triangles : npt.NDArray[np.float32] Array of node index for triangular elements. quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t] Reference to underlying jigsaw mesh's quadrangle element structure. quads : npt.NDArray[np.float32] Array of node index for quadrangular elements. crs : CRS Coodrinate reference system of the mesh object hull : Hull Handle to hull calculation helper object nodes : Nodes Handle to node handler helper object elements : Elements Handle to element handler helper object Methods ------- write(path, overwrite=False, format='grd') Export mesh object to the disk in the specified format. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type. """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID != 'euclidean-mesh': raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, ' "but expected 'euclidean-mesh'.") if not hasattr(mesh, 'crs'): warnings.warn('Input mesh has no CRS information.') mesh.crs = None else: if not isinstance(mesh.crs, CRS): raise ValueError(f'crs property must be of type {CRS}, not ' f'type {type(mesh.crs)}.') self._hull = None self._nodes = None self._elements = None self._msh_t = mesh def write( self, path: Union[str, os.PathLike], overwrite: bool = False, format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622 ) -> None: """Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported. """ path = pathlib.Path(path) if path.exists() and overwrite is not True: raise IOError( f'File {str(path)} exists and overwrite is not True.') if format == 'grd': grd_dict = utils.msh_t_to_grd(self.msh_t) if self._boundaries and self._boundaries.data: grd_dict.update(boundaries=self._boundaries.data) grd.write(grd_dict, path, overwrite) elif format == '2dm': sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite) elif format == 'msh': savemsh(str(path), self.msh_t) elif format == 'vtk': savevtk(str(path), self.msh_t) else: raise ValueError(f'Unhandled format {format}.') @property def tria3(self): """Reference to underlying mesh tirangle element structure""" return self.msh_t.tria3 @property def triangles(self): """Reference to underlying mesh triangle element index array""" return self.msh_t.tria3['index'] @property def quad4(self): """Reference to underlying mesh quadrangle element structure""" return self.msh_t.quad4 @property def quads(self): """Reference to underlying mesh quadrangle element index array""" return self.msh_t.quad4['index'] @property def crs(self): """Reference to underlying mesh crs""" return self.msh_t.crs @property def hull(self): """Reference to hull calculator helper object""" if self._hull is None: self._hull = Hull(self) return self._hull @property def nodes(self): """Reference to node handler helper object""" if self._nodes is None: self._nodes = Nodes(self) return self._nodes @property def elements(self): """Reference to element handler helper object""" if self._elements is None: self._elements = Elements(self) return self._elements class EuclideanMesh2D(EuclideanMesh): """2D Euclidean mesh definition Attributes ---------- boundaries vert2 value bbox Methods ------- get_bbox(crs=None, output_type=None) Gets the bounding box of the mesh elements. tricontourf(**kwargs) Create a contour plot from the value data on the nodes of the mesh interpolate(raster, method='spline', nprocs=None) Interpolate raster date on the nodes. get_contour(level) Get contour lines from node value data at specified levels. get_multipolygon(zmin=None, zmax=None) Get multipolygon of the mesh hull. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``. """ super().__init__(mesh) self._boundaries = None if mesh.ndims != +2: raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, ' "but expected ndims=2.") if len(self.msh_t.value) == 0: self.msh_t.value = np.array( np.full((self.vert2['coord'].shape[0], 1), np.nan)) def get_bbox( self, crs: Union[str, CRS, None] = None, output_type: Literal[None, 'polygon', 'bbox'] = None ) -> Union[Polygon, Bbox]: """Get the bounding box of mesh elements. Parameters ---------- crs : str or CRS or None, default=None CRS to transform the calculated bounding box into before returning output_type : { None, 'polygon', 'bbox'}, default=None Output type Returns ------- Polygon or Bbox Bounding box of the mesh elements. """ output_type = 'polygon' if output_type is None else output_type xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0]) ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1]) crs = self.crs if crs is None else crs if crs is not None: if not self.crs.equals(crs): transformer = Transformer.from_crs( self.crs, crs, always_xy=True) # pylint: disable=E0633 (xmin, xmax), (ymin, ymax) = transformer.transform( (xmin, xmax), (ymin, ymax)) if output_type == 'polygon': # pylint: disable=R1705 return box(xmin, ymin, xmax, ymax) elif output_type == 'bbox': return Bbox([[xmin, ymin], [xmax, ymax]]) raise TypeError( 'Argument output_type must a string literal \'polygon\' or ' '\'bbox\'') @property def boundaries(self): """Handle to boundaries calculator helper object""" if self._boundaries is None: self._boundaries = Boundaries(self) return self._boundaries def tricontourf(self, **kwargs) -> Axes: """Generate contour for the data of triangular elements of the mesh Parameters ---------- **kwargs : dict, optional Passed to underlying `matplotlib` API. Returns ------- Axes Axes on which the filled contour is drawn. """ return utils.tricontourf(self.msh_t, **kwargs) def interpolate( self, raster: Union[Raster, List[Raster]], method: Literal['spline', 'linear', 'nearest'] = 'spline', nprocs: Optional[int] = None, info_out_path: Union[pathlib.Path, str, None] = None, filter_by_shape: bool = False ) -> None: """Interplate values from raster inputs to the mesh nodes. Parameters ---------- raster : Raster or list of Raster A single or a list of rasters from which values are interpolated onto the mesh method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. nprocs : int or None, default=None Number of workers to use when interpolating data. info_out_path : pathlike or str or None Path for the output node interpolation information file filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- None """ if isinstance(raster, Raster): raster = [raster] nprocs = -1 if nprocs is None else nprocs nprocs = cpu_count() if nprocs == -1 else nprocs # Fix an issue on Jupyter notebook where having pool execute # interpolation even in case of nprocs == 1 would results in # application getting stuck if nprocs > 1: with Pool(processes=nprocs) as pool: res = pool.starmap( _mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] ) pool.join() else: res = [_mesh_interpolate_worker( self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] values = self.msh_t.value.flatten() interp_info_map = {} for (mask, _values), rast in zip(res, raster): values[mask] = _values if info_out_path is not None: vert_cs = None rast_crs = rast.crs if rast_crs.is_vertical: if rast_crs.sub_crs_list is not None: for sub_crs in rast_crs.sub_crs_list: if sub_crs.is_vertical: # TODO: What if sub CRS is compound, etc.? vert_cs = sub_crs elif rast_crs.source_crs is not None: if rast_crs.source_crs.is_vertical: # TODO: What if source CRS is compound, etc.? vert_cs = rast_crs.source_crs vert_cs_name = vert_cs.name idxs = np.argwhere(mask).ravel() interp_info_map.update({ idx: (rast.path, vert_cs_name) for idx in idxs}) if info_out_path is not None: coords = self.msh_t.vert2['coord'].copy() geo_coords = coords.copy() if not self.crs.is_geographic: transformer = Transformer.from_crs( self.crs, CRS.from_epsg(4326), always_xy=True) # pylint: disable=E0633 geo_coords[:, 0], geo_coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) vd_idxs=np.array(list(interp_info_map.keys())) df_interp_info = pd.DataFrame( index=vd_idxs, data={ 'x': coords[vd_idxs, 0], 'y': coords[vd_idxs, 1], 'lat': geo_coords[vd_idxs, 0], 'lon': geo_coords[vd_idxs, 1], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()] } ) df_interp_info.sort_index().to_csv( info_out_path, header=False, index=True) self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) def get_contour(self, level: float) -> LineString: """Extract contour lines at the specified `level` from mesh values Parameters ---------- level : float The level at which contour lines must be extracted. Returns ------- LineString Extracted and merged contour lines. Raises ------ ValueError If mesh has nodes that have null value `np.nan`. """ # ONLY SUPPORTS TRIANGLES for attr in ['quad4', 'hexa8']: if len(getattr(self.msh_t, attr)) > 0: warnings.warn( 'Mesh contour extraction only supports triangles') coords = self.msh_t.vert2['coord'] values = self.msh_t.value trias = self.msh_t.tria3['index'] if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") x, y = coords[:, 0], coords[:, 1] features = [] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) _logger.debug('Computing contours...') fig, ax = plt.subplots() ax.tricontour( x, y, trias, values.ravel(), levels=[level]) plt.close(fig) for path_collection in ax.collections: for path in path_collection.get_paths(): try: features.append(LineString(path.vertices)) except ValueError: # LineStrings must have at least 2 coordinate tuples pass return linemerge(features) def get_multipolygon( self, zmin: Optional[float] = None, zmax: Optional[float] = None ) -> MultiPolygon: """Calculate multipolygon covering mesh elements (hull) Parameters ---------- zmin : float or None Minimum elevation to consider for multipolygon extraction zmax : float or None Maximum elevation to consider for multipolygon extraction Returns ------- MultiPolygon Calculated multipolygon shape """ values = self.msh_t.value mask = np.ones(values.shape) if zmin is not None: mask = np.logical_and(mask, values > zmin) if zmax is not None: mask = np.logical_and(mask, values < zmax) # Assuming value is of shape (N, 1) # ravel to make sure it's 1D verts_in = np.argwhere(mask).ravel() clipped_mesh = utils.clip_mesh_by_vertex( self.msh_t, verts_in, can_use_other_verts=True) boundary_edges = utils.get_boundary_edges(clipped_mesh) coords = clipped_mesh.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} poly_gen = polygonize(coords[boundary_edges]) polys = list(poly_gen) polys = sorted(polys, key=lambda p: p.area, reverse=True) rings = [p.exterior for p in polys] n_parents = np.zeros((len(rings),)) represent = np.array([r.coords[0] for r in rings]) for e, ring in enumerate(rings[:-1]): path = Path(ring.coords, closed=True) n_parents = n_parents + np.pad( np.array([ path.contains_point(pt) for pt in represent[e+1:]]), (e+1, 0), 'constant', constant_values=0) # Get actual polygons based on logic described above polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2] return MultiPolygon(polys) @property def vert2(self): """Reference to underlying mesh 2D vertices structure""" return self.msh_t.vert2 @property def value(self): """Reference to underlying mesh values""" return self.msh_t.value @property def bbox(self): """Calculates and returns bounding box of the mesh hull. See Also -------- get_bbox """ return self.get_bbox() MeshType = Union[EuclideanMesh2D] class Mesh(BaseMesh): """Mesh object factory Factory class that creates and returns concrete mesh object based on the input types. Methods ------- open(path, crs=None) Read mesh data from a file on disk. """ def __new__(cls, mesh: jigsaw_msh_t) -> MeshType: """Construct a concrete mesh object. Parameters ---------- mesh : jigsaw_msh_t Input jigsaw mesh object Returns ------- MeshType Mesh object created from the input Raises ------ TypeError Input `mesh` is not a `jigsaw_msh_t` object. NotImplementedError Input `mesh` object cannot be used to create a EuclideanMesh2D """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID == 'euclidean-mesh': if mesh.ndims == 2: return EuclideanMesh2D(mesh) raise NotImplementedError( f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not ' 'handled.') raise NotImplementedError(f'mshID={mesh.mshID} not handled.') @staticmethod def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType: """Read mesh from a file on disk Parameters ---------- path : path-like Path to the file containig mesh. crs : CRS or None, default=None CRS of the mesh in the path. Overwrites any info read from file, no transformation is done. Returns ------- MeshType Mesh object created by reading the file. Raises ------ TypeError If cannot determine the input mesh type. Notes ----- Currently only SMS-2DM and GRD formats are supported for reading. """ try: msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs)) msh_t.value = np.negative(msh_t.value) return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 if 'not a valid grd file' in str(e): pass else: raise e try: return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs))) except ValueError: pass try: msh_t = jigsaw_msh_t() loadmsh(msh_t, path) msh_t.crs = crs return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 pass raise TypeError( f'Unable to automatically determine file type for {str(path)}.') class Rings: """Helper class for handling mesh rings. This is a helper class to manage the calculation of internal and external rings of the mesh polygon or hull. Attributes ---------- Methods ------- __call__() Returns all rings of the mesh hull interior() Return the interior rings of the mesh hull exterior() Return the exterior rings of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the ring calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates rings. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ polys = utils.get_mesh_polygons(self.mesh.msh_t) data = [] bnd_id = 0 for poly in polys: data.append({ "geometry": poly.exterior, "bnd_id": bnd_id, "type": 'exterior' }) for interior in poly.interiors: data.append({ "geometry": interior, "bnd_id": bnd_id, "type": 'interior' }) bnd_id = bnd_id + 1 return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Extracts the exterior ring from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior ring of the mesh hull polygon. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Extracts the interior rings from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior rings of the mesh hull polygon. """ return self().loc[self()['type'] == 'interior'] class Edges: """Helper class for handling mesh boundary edges. Attributes ---------- Methods ------- __call__() Return all boundary edges of the mesh hull interior() Return the interior boundary edges of the mesh hull exterior() Return the exterior boundary edges of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the edge calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which boundary edges are calculated. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for ring in self.mesh.hull.rings().itertuples(): coords = ring.geometry.coords for i in range(1, len(coords)): data.append({ "geometry": LineString([coords[i-1], coords[i]]), "bnd_id": ring.bnd_id, "type": ring.type}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Retruns exterior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Retruns interior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'interior'] class Hull: """Helper class for handling mesh hull calculations. This class wraps the functionality of ring and edge classes and adds additional methods to calculate or extract the polygon or triangulation of the mesh Attributes ---------- Methods ------- __call__() Calculates all the polys from all mesh rings exterior() Calculates the exterior rings of the mesh hull. interior() Calculates the interior rings of the mesh hull. implode() Calculates all the polygons (including isolated domain islands) in the mesh and returns a table of polygons. multipolygon() Calculates all the polygons (including isolated domain islands) in the mesh and returns a multipolygon. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize helper class for handling mesh hull calculations Parameters ---------- mesh : EuclideanMesh Input mesh for which hull calculations are done. Notes ----- This object holds onto the ring and edge calculator objects as well as a reference to the input mesh. """ self.mesh = mesh self.rings = Rings(mesh) self.edges = Edges(mesh) @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for bnd_id in np.unique(self.rings()['bnd_id'].tolist()): exterior = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior')] interiors = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior')] data.append({ "geometry": Polygon( exterior.iloc[0].geometry.coords, [row.geometry.coords for _, row in interiors.iterrows()]), "bnd_id": bnd_id }) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Creates polygons from exterior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from exterior rings of the mesh hull """ data = [] for exterior in self.rings().loc[ self.rings()['type'] == 'exterior'].itertuples(): data.append({"geometry": Polygon(exterior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def interior(self) -> gpd.GeoDataFrame: """Creates polygons from interior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from interior rings of the mesh hull """ data = [] for interior in self.rings().loc[ self.rings()['type'] == 'interior'].itertuples(): data.append({"geometry": Polygon(interior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def implode(self) -> gpd.GeoDataFrame: """Creates a dataframe from mesh polygons. Parameters ---------- Returns ------ gpd.GeoDataFrame Dataframe containing polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The difference of the return value of this method and `__call__` is that the `implode` returns a dataframe with a single `MultiPolygon` where as `__call__` returns a dataframe with multiple `Polygon` entries with associated `bnd_id`. """ return gpd.GeoDataFrame( {"geometry": MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) def multipolygon(self) -> MultiPolygon: """Returns mesh multi-polygons. Parameters ---------- Returns ------ MultiPolygon Combined shape of polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. implode() Dataframe with a single combined multipolygon of the mesh polygons. Notes ----- The difference of the return value of this method and `implode` is that `multipolygon` returns a `MultiPolygon` object where as `implode` returns a dataframe warpping the multipolygon object. """ mp = self.implode().iloc[0].geometry if isinstance(mp, Polygon): mp = MultiPolygon([mp]) return mp def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.mesh.msh_t.tria3['index'].tolist() for quad in self.mesh.msh_t.quad4['index']: triangles.extend([ [quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]] ]) return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) class Nodes: """Helper class for handling mesh nodes. Attributes ---------- id_to_index : dict Mapping to convert node IDs to node indexes. index_to_id : dict Mapping to convert node indexes to node IDs. Methods ------- __call__() Creates a mapping between node IDs (index + 1) and node coordinates id() Returns list of node IDs. index() Return array of node indices. coords() Return mesh coordinates. values() Return values stored for mesh nodes. get_index_by_id(node_id) Get the node index based on node ID. get_id_by_index(index) Get the node ID based on the node index. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes node handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles nodes info. """ self.mesh = mesh self._id_to_index = None self._index_to_id = None @lru_cache(maxsize=1) def __call__(self) -> Dict[int, int]: """Creates a mapping between node IDs and indexes. Parameters ---------- Returns ------- dict Mapping between node IDs and indexes. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return {i+1: coord for i, coord in enumerate(self.coords())} def id(self) -> List[int]: """Retrives a list of element IDs. Parameters ---------- Returns ------- list of int List of node IDs as created by `__call__` """ return list(self().keys()) def index(self) -> npt.NDArray[int]: """Retrives an array of element indexes. Parameters ---------- Returns ------- array-like Array of node indexes. """ return np.arange(len(self())) def coords(self) -> npt.NDArray[np.float32]: """Retrieve the coordinates of mesh nodes Parameters ---------- Returns ------- array-like Coordinates of the mesh nodes as returned by `BaseMesh.coord` """ return self.mesh.coord def values(self): """Retrieve the values stored for mesh nodes Parameters ---------- Returns ------- array-like Values on the mesh nodes as returned by `BaseMesh.values` """ return self.mesh.values def get_index_by_id(self, node_id): """Converts mesh ID to mesh index. Parameters ---------- node_id : int ID of the node of interest Returns ------- int Index of the node of interest """ return self.id_to_index[node_id] def get_id_by_index(self, index: int): """Converts mesh index to mesh ID. Parameters ---------- index : int Index of the node of interest. Returns ------- int ID of the node of interest """ return self.index_to_id[index] @property def id_to_index(self) -> Dict[int, int]: """Read-only property returning the mapping of ID to index Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._id_to_index is None: self._id_to_index = {node_id: index for index, node_id in enumerate(self().keys())} return self._id_to_index @property def index_to_id(self) -> Dict[int, int]: """Read-only property returning the mapping of index to ID Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._index_to_id is None: self._index_to_id = dict(enumerate(self().keys())) return self._index_to_id # def get_indexes_around_index(self, index): # indexes_around_index = self.__dict__.get('indexes_around_index') # if indexes_around_index is None: # def append(geom): # for simplex in geom: # for i, j in permutations(simplex, 2): # indexes_around_index[i].add(j) # indexes_around_index = defaultdict(set) # append(self.gr3.elements.triangles()) # append(self.gr3.elements.quads()) # self.__dict__['indexes_around_index'] = indexes_around_index # return list(indexes_around_index[index]) class Elements: """Helper class for handling mesh elements. Attributes ---------- Methods -------- __call__() Creates a mapping between element IDs and associated node IDs. id() Returns a list of element IDs. index() Returns an array of element indexes. array() Creates and returns a masked array of element node indices. triangles() Creates and returns a 2D array of triangular element node indices. quads() Creates and returns a 2D array of quadrangular element node indices. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. geodataframe() Creates and returns a dataframe of with polygon entires for each element. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize the element handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles elements info. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> Dict[int, npt.NDArray[int]]: """Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ elements = {i+1: index+1 for i, index in enumerate(self.mesh.msh_t.tria3['index'])} elements.update({i+len(elements)+1: index+1 for i, index in enumerate(self.mesh.msh_t.quad4['index'])}) return elements @lru_cache(maxsize=1) def id(self) -> List[int]: """Retrieves the list of element IDs as returned by `__call__` Parameters ---------- Returns ------- list of int List of element IDs. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return list(self().keys()) @lru_cache(maxsize=1) def index(self) -> npt.NDArray[int]: """Retrieves an array of element indices Parameters ---------- Returns ------- npt.NDArray 1D array of element indices. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.arange(len(self())) def array(self) -> npt.NDArray[int]: """Retrieves a masked array of element node IDs. The return value is ``n x m`` where ``n`` is the number of elements and ``m`` is the maximum number of element nodes, e.g. if there are only trias, then it's 3, for trias and quads it is 4. Parameters ---------- Returns ------- npt.NDArray Masked array where elements with fewer associated nodes have trailing masked node columns in the array. """ rank = int(max(map(len, self().values()))) array = np.full((len(self()), rank), -1) for i, elem_nd_ids in enumerate(self().values()): row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))) array[i, :len(row)] = row return np.ma.masked_equal(array, -1) @lru_cache(maxsize=1) def triangles(self) -> npt.NDArray[int]: """Retrieves an array of tria element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for triangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 3]) @lru_cache(maxsize=1) def quads(self): """Retrieves an array of quad element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for quadrangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 4]) def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.triangles().tolist() for quad in self.quads(): # TODO: Not tested. triangles.append([quad[0], quad[1], quad[3]]) triangles.append([quad[1], quad[2], quad[3]]) return Triangulation( self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) def geodataframe(self) -> gpd.GeoDataFrame: """Create polygons for each element and return in dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe created from entries of `Polygon` type for each element. """ data = [] for elem_id, elem_nd_ids in self().items(): data.append({ 'geometry': Polygon( self.mesh.coord[list( map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) class Boundaries: """Helper class for mesh boundary condition calculation Attributes ---------- data : dict Mapping for boundary information Methods ------- __call__() Retrieves a dataframe for all boundary shapes and type info. __len__() Gets the number of calculated boundary segments. ocean() Retrieves a dataframe containing shapes and type info of ocean boundaries land() Retrieves a dataframe containing shapes and type info of land boundaries interior() Retrieves a dataframe containing shapes and type info of island boundaries auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1) Automatically generate boundary information based on the input land indicator `threshold` """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize boundary helper object Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates boundaries. """ # TODO: Add a way to manually initialize self.mesh = mesh self._ocean = gpd.GeoDataFrame() self._land = gpd.GeoDataFrame() self._interior = gpd.GeoDataFrame() self._data = defaultdict(defaultdict) @lru_cache(maxsize=1) def _init_dataframes(self) -> None: """Internal: Creates boundary dataframes based on boundary data Parameters ---------- Returns ------- None Notes ----- This method doesn't have any return value, but it is cached so that on re-execution it doesn't recalculate. """ boundaries = self._data ocean_boundaries = [] land_boundaries = [] interior_boundaries = [] if boundaries is not None: for ibtype, bnds in boundaries.items(): if ibtype is None: for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) ocean_boundaries.append({ 'id': bnd_id, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) elif str(ibtype).endswith('1'): for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) interior_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) else: for bnd_id, data in bnds.items(): _indexes = np.array(data['indexes']) if _indexes.ndim > 1: # ndim > 1 implies we're dealing with an ADCIRC # mesh that includes boundary pairs, such as weir new_indexes = [] for i, line in enumerate(_indexes.T): if i % 2 != 0: new_indexes.extend(np.flip(line)) else: new_indexes.extend(line) _indexes = np.array(new_indexes).flatten() else: _indexes = _indexes.flatten() indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes)) land_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) self._ocean = gpd.GeoDataFrame(ocean_boundaries) self._land = gpd.GeoDataFrame(land_boundaries) self._interior = gpd.GeoDataFrame(interior_boundaries) def ocean(self) -> gpd.GeoDataFrame: """Retrieve the ocean boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of ocean open boundary. """ self._init_dataframes() return self._ocean def land(self): """Retrieve the land boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of land boundary. """ self._init_dataframes() return self._land def interior(self): """Retrieve the island boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of island boundary. """ self._init_dataframes() return self._interior @property def data(self) -> Dict[Optional[int], Any]: """Read-only property referencing the boundary data dictionary""" return self._data @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ self._init_dataframes() data = [] for bnd in self.ocean().itertuples(): data.append({ 'id': bnd.id, 'ibtype': None, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.land().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.interior().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def __len__(self) -> int: """Returns the number of boundary segments""" return len(self()) def auto_generate( self, threshold: float = 0., land_ibtype: int = 0, interior_ibtype: int = 1, ): """Automatically detect boundaries based on elevation data. Parameters ---------- threshold : float, default=0 Threshold above which nodes are considered dry nodes for ocean vs land boundary detection land_ibtype : int, default=0 Value to assign to land boundary type interior_ibtype : int, default=1 Value to assign to island boundary type Returns ------- None Raises ------ ValueError If any of the values assigned to a mesh node is `np.nan`. Notes ----- An edge is considered dry if any of the attached nodes are dry (its elevation is larger than or equal to the `threshold`). """ values = self.mesh.value if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") coords = self.mesh.msh_t.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} polys = utils.get_mesh_polygons(self.mesh.msh_t) # TODO: Split using shapely to get bdry segments boundaries = defaultdict(defaultdict) bdry_type = dict get_id = self.mesh.nodes.get_id_by_index # generate exterior boundaries for poly in polys: ext_ring_coo = poly.exterior.coords ext_ring = np.array([ (coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[e + 1]]) for e, coo in enumerate(ext_ring_coo[:-1])]) # find boundary edges edge_tag = np.full(ext_ring.shape, 0) edge_tag[ np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1 edge_tag[ np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1 edge_tag[ np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1 edge_tag[ np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1 # sort boundary edges ocean_boundary = [] land_boundary = [] for i, (e0, e1) in enumerate(edge_tag): if np.any(np.asarray((e0, e1)) == 1): land_boundary.append(tuple(ext_ring[i, :])) elif np.any(np.asarray((e0, e1)) == -1): ocean_boundary.append(tuple(ext_ring[i, :])) # ocean_boundaries = utils.sort_edges(ocean_boundary) # land_boundaries = utils.sort_edges(land_boundary) ocean_boundaries = [] if len(ocean_boundary) != 0: #pylint: disable=not-an-iterable ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist()) ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs ocean_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in ocean_segs] land_boundaries = [] if len(land_boundary) != 0: #pylint: disable=not-an-iterable land_segs = linemerge(coords[np.array(land_boundary)].tolist()) land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs land_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in land_segs] _bnd_id = len(boundaries[None]) for bnd in ocean_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[None][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # add land boundaries _bnd_id = len(boundaries[land_ibtype]) for bnd in land_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[land_ibtype][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # generate interior boundaries _bnd_id = 0 interior_boundaries = defaultdict() for poly in polys: interiors = poly.interiors for interior in interiors: int_ring_coo = interior.coords int_ring = [ (coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[e + 1]]) for e, coo in enumerate(int_ring_coo[:-1])] # TODO: Do we still need these? e0, e1 = [list(t) for t in zip(*int_ring)] if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0: e0 = e0[::-1] e1 = e1[::-1] e0 = [get_id(vert) for vert in e0] e0.append(e0[0]) interior_boundaries[_bnd_id] = e0 _bnd_id += 1 for bnd_id, data in interior_boundaries.items(): boundaries[interior_ibtype][bnd_id] = bdry_type( indexes=data, properties={}) self._data = boundaries self._init_dataframes.cache_clear() self.__call__.cache_clear() self._init_dataframes() SortedRingType = Dict[int, Dict[Literal['exterior', 'interiors'], Union[npt.NDArray, List[npt.NDArray]]] ] def sort_rings( index_rings: List[List[Tuple[int, int]]], vertices: npt.NDArray[np.float32]) -> SortedRingType: """Sorts a list of index-rings. Takes a list of unsorted index rings and sorts them into "exterior" and "interior" components. Any doubly-nested rings are considered exterior rings. Parameters ---------- index_rings : List[List[Tuple[int, int]]] Unosorted list of list of mesh edges as specified by end node indexs of each edge. vertices : npt.NDArray[np.float32] 2D ``n x 2`` array of node coordinate couples. Returns ------- SortedRingType Dictionary of information aboout polygon boundaries extracted based on the input Notes ----- The return value is a mapping of ring index to dictionary containing exterior and interior linear ring information as numpy array This function is not currently used, instead a different faster approach is used for boundary and polygon calculation from elements. """ # TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can # probably be optimized using shapely. # sort index_rings into corresponding "polygons" areas = [] for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = {} _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = [] for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = [] for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings def _mesh_interpolate_worker( coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[str, Path], chunk_size: Optional[int], method: Literal['spline', 'linear', 'nearest'] = "spline", filter_by_shape: bool = False): """Interpolator worker function to be used in parallel calls Parameters ---------- coords : npt.NDArray[np.float32] Mesh node coordinates. coords_crs : CRS Coordinate reference system of the input mesh coordinates. raster_path : str or Path Path to the raster temporary working file. chunk_size : int or None Chunk size for windowing over the raster. method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- idxs : npt.NDArray[bool] Mask of the nodes whose values are updated by current interpolation values : npt.NDArray[np.float32] Interpolated values. Raises ------ ValueError If specified interpolation `method` is not supported. """ coords = np.array(coords) raster = Raster(raster_path) idxs = [] values = [] for window in raster.iter_windows(chunk_size=chunk_size, overlap=2): if not raster.crs.equals(coords_crs): transformer = Transformer.from_crs( coords_crs, raster.crs, always_xy=True) # pylint: disable=E0633 coords[:, 0], coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) xi = raster.get_x(window) yi = raster.get_y(window) # Use masked array to ignore missing values from DEM zi = raster.get_values(window=window, masked=True) if not filter_by_shape: _idxs = np.logical_and( np.logical_and( np.min(xi) <= coords[:, 0], np.max(xi) >= coords[:, 0]), np.logical_and( np.min(yi) <= coords[:, 1], np.max(yi) >= coords[:, 1])) else: shape = raster.get_multipolygon() gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1]) _idxs = gs_pt.intersects(shape) interp_mask = None if method == 'spline': f = RectBivariateSpline( xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0, # bbox=[min(x), max(x), min(y), max(y)] # ?? ) _values = f.ev(coords[_idxs, 0], coords[_idxs, 1]) elif method in ['nearest', 'linear']: # Inspired by StackOverflow 35807321 if np.any(zi.mask): m_interp = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method ) # Pick nodes NOT "contaminated" by masked values interp_mask = m_interp(coords[_idxs]) > 0 f = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi).T, method=method ) _values = f(coords[_idxs]) else: raise ValueError( f"Invalid value method specified <{method}>!") if interp_mask is not None: # pylint: disable=invalid-unary-operand-type helper = np.ones_like(_values).astype(bool) helper[interp_mask] = False # _idxs is inverse mask _idxs[_idxs] = helper _values = _values[~interp_mask] idxs.append(_idxs) values.append(_values) return (np.hstack(idxs), np.hstack(values))
write
Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported.
"""This module defines classes that handle mesh and mesh operations. This module defines a factory class for mesh, similar to geometry and size function factory class. It also defines concrete mesh types. Currently two concrete mesh types are defined for generic Eucledian mesh and specific 2D Eucledian mesh. """ from functools import lru_cache import logging from multiprocessing import Pool, cpu_count import os import pathlib from collections import defaultdict import warnings from typing import Union, List, Tuple, Dict, Any, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal import pandas as pd import geopandas as gpd from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk from matplotlib.path import Path from matplotlib.transforms import Bbox from matplotlib.tri import Triangulation from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np import numpy.typing as npt from pyproj import CRS, Transformer from scipy.interpolate import ( RectBivariateSpline, RegularGridInterpolator) from shapely.geometry import ( LineString, box, Polygon, MultiPolygon) from shapely.ops import polygonize, linemerge from ocsmesh import utils from ocsmesh.raster import Raster from ocsmesh.mesh.base import BaseMesh from ocsmesh.mesh.parsers import grd, sms2dm _logger = logging.getLogger(__name__) class EuclideanMesh(BaseMesh): """Generic Euclidean mesh class This is the base class for 2D or 3D Euclidean mesh. Attributes ---------- tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t] Reference to underlying jigsaw mesh's triangle element structure. triangles : npt.NDArray[np.float32] Array of node index for triangular elements. quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t] Reference to underlying jigsaw mesh's quadrangle element structure. quads : npt.NDArray[np.float32] Array of node index for quadrangular elements. crs : CRS Coodrinate reference system of the mesh object hull : Hull Handle to hull calculation helper object nodes : Nodes Handle to node handler helper object elements : Elements Handle to element handler helper object Methods ------- write(path, overwrite=False, format='grd') Export mesh object to the disk in the specified format. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type. """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID != 'euclidean-mesh': raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, ' "but expected 'euclidean-mesh'.") if not hasattr(mesh, 'crs'): warnings.warn('Input mesh has no CRS information.') mesh.crs = None else: if not isinstance(mesh.crs, CRS): raise ValueError(f'crs property must be of type {CRS}, not ' f'type {type(mesh.crs)}.') self._hull = None self._nodes = None self._elements = None self._msh_t = mesh # MASKED: write function (lines 117-164) @property def tria3(self): """Reference to underlying mesh tirangle element structure""" return self.msh_t.tria3 @property def triangles(self): """Reference to underlying mesh triangle element index array""" return self.msh_t.tria3['index'] @property def quad4(self): """Reference to underlying mesh quadrangle element structure""" return self.msh_t.quad4 @property def quads(self): """Reference to underlying mesh quadrangle element index array""" return self.msh_t.quad4['index'] @property def crs(self): """Reference to underlying mesh crs""" return self.msh_t.crs @property def hull(self): """Reference to hull calculator helper object""" if self._hull is None: self._hull = Hull(self) return self._hull @property def nodes(self): """Reference to node handler helper object""" if self._nodes is None: self._nodes = Nodes(self) return self._nodes @property def elements(self): """Reference to element handler helper object""" if self._elements is None: self._elements = Elements(self) return self._elements class EuclideanMesh2D(EuclideanMesh): """2D Euclidean mesh definition Attributes ---------- boundaries vert2 value bbox Methods ------- get_bbox(crs=None, output_type=None) Gets the bounding box of the mesh elements. tricontourf(**kwargs) Create a contour plot from the value data on the nodes of the mesh interpolate(raster, method='spline', nprocs=None) Interpolate raster date on the nodes. get_contour(level) Get contour lines from node value data at specified levels. get_multipolygon(zmin=None, zmax=None) Get multipolygon of the mesh hull. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``. """ super().__init__(mesh) self._boundaries = None if mesh.ndims != +2: raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, ' "but expected ndims=2.") if len(self.msh_t.value) == 0: self.msh_t.value = np.array( np.full((self.vert2['coord'].shape[0], 1), np.nan)) def get_bbox( self, crs: Union[str, CRS, None] = None, output_type: Literal[None, 'polygon', 'bbox'] = None ) -> Union[Polygon, Bbox]: """Get the bounding box of mesh elements. Parameters ---------- crs : str or CRS or None, default=None CRS to transform the calculated bounding box into before returning output_type : { None, 'polygon', 'bbox'}, default=None Output type Returns ------- Polygon or Bbox Bounding box of the mesh elements. """ output_type = 'polygon' if output_type is None else output_type xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0]) ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1]) crs = self.crs if crs is None else crs if crs is not None: if not self.crs.equals(crs): transformer = Transformer.from_crs( self.crs, crs, always_xy=True) # pylint: disable=E0633 (xmin, xmax), (ymin, ymax) = transformer.transform( (xmin, xmax), (ymin, ymax)) if output_type == 'polygon': # pylint: disable=R1705 return box(xmin, ymin, xmax, ymax) elif output_type == 'bbox': return Bbox([[xmin, ymin], [xmax, ymax]]) raise TypeError( 'Argument output_type must a string literal \'polygon\' or ' '\'bbox\'') @property def boundaries(self): """Handle to boundaries calculator helper object""" if self._boundaries is None: self._boundaries = Boundaries(self) return self._boundaries def tricontourf(self, **kwargs) -> Axes: """Generate contour for the data of triangular elements of the mesh Parameters ---------- **kwargs : dict, optional Passed to underlying `matplotlib` API. Returns ------- Axes Axes on which the filled contour is drawn. """ return utils.tricontourf(self.msh_t, **kwargs) def interpolate( self, raster: Union[Raster, List[Raster]], method: Literal['spline', 'linear', 'nearest'] = 'spline', nprocs: Optional[int] = None, info_out_path: Union[pathlib.Path, str, None] = None, filter_by_shape: bool = False ) -> None: """Interplate values from raster inputs to the mesh nodes. Parameters ---------- raster : Raster or list of Raster A single or a list of rasters from which values are interpolated onto the mesh method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. nprocs : int or None, default=None Number of workers to use when interpolating data. info_out_path : pathlike or str or None Path for the output node interpolation information file filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- None """ if isinstance(raster, Raster): raster = [raster] nprocs = -1 if nprocs is None else nprocs nprocs = cpu_count() if nprocs == -1 else nprocs # Fix an issue on Jupyter notebook where having pool execute # interpolation even in case of nprocs == 1 would results in # application getting stuck if nprocs > 1: with Pool(processes=nprocs) as pool: res = pool.starmap( _mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] ) pool.join() else: res = [_mesh_interpolate_worker( self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] values = self.msh_t.value.flatten() interp_info_map = {} for (mask, _values), rast in zip(res, raster): values[mask] = _values if info_out_path is not None: vert_cs = None rast_crs = rast.crs if rast_crs.is_vertical: if rast_crs.sub_crs_list is not None: for sub_crs in rast_crs.sub_crs_list: if sub_crs.is_vertical: # TODO: What if sub CRS is compound, etc.? vert_cs = sub_crs elif rast_crs.source_crs is not None: if rast_crs.source_crs.is_vertical: # TODO: What if source CRS is compound, etc.? vert_cs = rast_crs.source_crs vert_cs_name = vert_cs.name idxs = np.argwhere(mask).ravel() interp_info_map.update({ idx: (rast.path, vert_cs_name) for idx in idxs}) if info_out_path is not None: coords = self.msh_t.vert2['coord'].copy() geo_coords = coords.copy() if not self.crs.is_geographic: transformer = Transformer.from_crs( self.crs, CRS.from_epsg(4326), always_xy=True) # pylint: disable=E0633 geo_coords[:, 0], geo_coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) vd_idxs=np.array(list(interp_info_map.keys())) df_interp_info = pd.DataFrame( index=vd_idxs, data={ 'x': coords[vd_idxs, 0], 'y': coords[vd_idxs, 1], 'lat': geo_coords[vd_idxs, 0], 'lon': geo_coords[vd_idxs, 1], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()] } ) df_interp_info.sort_index().to_csv( info_out_path, header=False, index=True) self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) def get_contour(self, level: float) -> LineString: """Extract contour lines at the specified `level` from mesh values Parameters ---------- level : float The level at which contour lines must be extracted. Returns ------- LineString Extracted and merged contour lines. Raises ------ ValueError If mesh has nodes that have null value `np.nan`. """ # ONLY SUPPORTS TRIANGLES for attr in ['quad4', 'hexa8']: if len(getattr(self.msh_t, attr)) > 0: warnings.warn( 'Mesh contour extraction only supports triangles') coords = self.msh_t.vert2['coord'] values = self.msh_t.value trias = self.msh_t.tria3['index'] if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") x, y = coords[:, 0], coords[:, 1] features = [] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) _logger.debug('Computing contours...') fig, ax = plt.subplots() ax.tricontour( x, y, trias, values.ravel(), levels=[level]) plt.close(fig) for path_collection in ax.collections: for path in path_collection.get_paths(): try: features.append(LineString(path.vertices)) except ValueError: # LineStrings must have at least 2 coordinate tuples pass return linemerge(features) def get_multipolygon( self, zmin: Optional[float] = None, zmax: Optional[float] = None ) -> MultiPolygon: """Calculate multipolygon covering mesh elements (hull) Parameters ---------- zmin : float or None Minimum elevation to consider for multipolygon extraction zmax : float or None Maximum elevation to consider for multipolygon extraction Returns ------- MultiPolygon Calculated multipolygon shape """ values = self.msh_t.value mask = np.ones(values.shape) if zmin is not None: mask = np.logical_and(mask, values > zmin) if zmax is not None: mask = np.logical_and(mask, values < zmax) # Assuming value is of shape (N, 1) # ravel to make sure it's 1D verts_in = np.argwhere(mask).ravel() clipped_mesh = utils.clip_mesh_by_vertex( self.msh_t, verts_in, can_use_other_verts=True) boundary_edges = utils.get_boundary_edges(clipped_mesh) coords = clipped_mesh.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} poly_gen = polygonize(coords[boundary_edges]) polys = list(poly_gen) polys = sorted(polys, key=lambda p: p.area, reverse=True) rings = [p.exterior for p in polys] n_parents = np.zeros((len(rings),)) represent = np.array([r.coords[0] for r in rings]) for e, ring in enumerate(rings[:-1]): path = Path(ring.coords, closed=True) n_parents = n_parents + np.pad( np.array([ path.contains_point(pt) for pt in represent[e+1:]]), (e+1, 0), 'constant', constant_values=0) # Get actual polygons based on logic described above polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2] return MultiPolygon(polys) @property def vert2(self): """Reference to underlying mesh 2D vertices structure""" return self.msh_t.vert2 @property def value(self): """Reference to underlying mesh values""" return self.msh_t.value @property def bbox(self): """Calculates and returns bounding box of the mesh hull. See Also -------- get_bbox """ return self.get_bbox() MeshType = Union[EuclideanMesh2D] class Mesh(BaseMesh): """Mesh object factory Factory class that creates and returns concrete mesh object based on the input types. Methods ------- open(path, crs=None) Read mesh data from a file on disk. """ def __new__(cls, mesh: jigsaw_msh_t) -> MeshType: """Construct a concrete mesh object. Parameters ---------- mesh : jigsaw_msh_t Input jigsaw mesh object Returns ------- MeshType Mesh object created from the input Raises ------ TypeError Input `mesh` is not a `jigsaw_msh_t` object. NotImplementedError Input `mesh` object cannot be used to create a EuclideanMesh2D """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID == 'euclidean-mesh': if mesh.ndims == 2: return EuclideanMesh2D(mesh) raise NotImplementedError( f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not ' 'handled.') raise NotImplementedError(f'mshID={mesh.mshID} not handled.') @staticmethod def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType: """Read mesh from a file on disk Parameters ---------- path : path-like Path to the file containig mesh. crs : CRS or None, default=None CRS of the mesh in the path. Overwrites any info read from file, no transformation is done. Returns ------- MeshType Mesh object created by reading the file. Raises ------ TypeError If cannot determine the input mesh type. Notes ----- Currently only SMS-2DM and GRD formats are supported for reading. """ try: msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs)) msh_t.value = np.negative(msh_t.value) return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 if 'not a valid grd file' in str(e): pass else: raise e try: return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs))) except ValueError: pass try: msh_t = jigsaw_msh_t() loadmsh(msh_t, path) msh_t.crs = crs return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 pass raise TypeError( f'Unable to automatically determine file type for {str(path)}.') class Rings: """Helper class for handling mesh rings. This is a helper class to manage the calculation of internal and external rings of the mesh polygon or hull. Attributes ---------- Methods ------- __call__() Returns all rings of the mesh hull interior() Return the interior rings of the mesh hull exterior() Return the exterior rings of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the ring calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates rings. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ polys = utils.get_mesh_polygons(self.mesh.msh_t) data = [] bnd_id = 0 for poly in polys: data.append({ "geometry": poly.exterior, "bnd_id": bnd_id, "type": 'exterior' }) for interior in poly.interiors: data.append({ "geometry": interior, "bnd_id": bnd_id, "type": 'interior' }) bnd_id = bnd_id + 1 return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Extracts the exterior ring from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior ring of the mesh hull polygon. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Extracts the interior rings from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior rings of the mesh hull polygon. """ return self().loc[self()['type'] == 'interior'] class Edges: """Helper class for handling mesh boundary edges. Attributes ---------- Methods ------- __call__() Return all boundary edges of the mesh hull interior() Return the interior boundary edges of the mesh hull exterior() Return the exterior boundary edges of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the edge calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which boundary edges are calculated. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for ring in self.mesh.hull.rings().itertuples(): coords = ring.geometry.coords for i in range(1, len(coords)): data.append({ "geometry": LineString([coords[i-1], coords[i]]), "bnd_id": ring.bnd_id, "type": ring.type}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Retruns exterior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Retruns interior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'interior'] class Hull: """Helper class for handling mesh hull calculations. This class wraps the functionality of ring and edge classes and adds additional methods to calculate or extract the polygon or triangulation of the mesh Attributes ---------- Methods ------- __call__() Calculates all the polys from all mesh rings exterior() Calculates the exterior rings of the mesh hull. interior() Calculates the interior rings of the mesh hull. implode() Calculates all the polygons (including isolated domain islands) in the mesh and returns a table of polygons. multipolygon() Calculates all the polygons (including isolated domain islands) in the mesh and returns a multipolygon. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize helper class for handling mesh hull calculations Parameters ---------- mesh : EuclideanMesh Input mesh for which hull calculations are done. Notes ----- This object holds onto the ring and edge calculator objects as well as a reference to the input mesh. """ self.mesh = mesh self.rings = Rings(mesh) self.edges = Edges(mesh) @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for bnd_id in np.unique(self.rings()['bnd_id'].tolist()): exterior = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior')] interiors = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior')] data.append({ "geometry": Polygon( exterior.iloc[0].geometry.coords, [row.geometry.coords for _, row in interiors.iterrows()]), "bnd_id": bnd_id }) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Creates polygons from exterior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from exterior rings of the mesh hull """ data = [] for exterior in self.rings().loc[ self.rings()['type'] == 'exterior'].itertuples(): data.append({"geometry": Polygon(exterior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def interior(self) -> gpd.GeoDataFrame: """Creates polygons from interior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from interior rings of the mesh hull """ data = [] for interior in self.rings().loc[ self.rings()['type'] == 'interior'].itertuples(): data.append({"geometry": Polygon(interior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def implode(self) -> gpd.GeoDataFrame: """Creates a dataframe from mesh polygons. Parameters ---------- Returns ------ gpd.GeoDataFrame Dataframe containing polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The difference of the return value of this method and `__call__` is that the `implode` returns a dataframe with a single `MultiPolygon` where as `__call__` returns a dataframe with multiple `Polygon` entries with associated `bnd_id`. """ return gpd.GeoDataFrame( {"geometry": MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) def multipolygon(self) -> MultiPolygon: """Returns mesh multi-polygons. Parameters ---------- Returns ------ MultiPolygon Combined shape of polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. implode() Dataframe with a single combined multipolygon of the mesh polygons. Notes ----- The difference of the return value of this method and `implode` is that `multipolygon` returns a `MultiPolygon` object where as `implode` returns a dataframe warpping the multipolygon object. """ mp = self.implode().iloc[0].geometry if isinstance(mp, Polygon): mp = MultiPolygon([mp]) return mp def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.mesh.msh_t.tria3['index'].tolist() for quad in self.mesh.msh_t.quad4['index']: triangles.extend([ [quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]] ]) return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) class Nodes: """Helper class for handling mesh nodes. Attributes ---------- id_to_index : dict Mapping to convert node IDs to node indexes. index_to_id : dict Mapping to convert node indexes to node IDs. Methods ------- __call__() Creates a mapping between node IDs (index + 1) and node coordinates id() Returns list of node IDs. index() Return array of node indices. coords() Return mesh coordinates. values() Return values stored for mesh nodes. get_index_by_id(node_id) Get the node index based on node ID. get_id_by_index(index) Get the node ID based on the node index. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes node handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles nodes info. """ self.mesh = mesh self._id_to_index = None self._index_to_id = None @lru_cache(maxsize=1) def __call__(self) -> Dict[int, int]: """Creates a mapping between node IDs and indexes. Parameters ---------- Returns ------- dict Mapping between node IDs and indexes. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return {i+1: coord for i, coord in enumerate(self.coords())} def id(self) -> List[int]: """Retrives a list of element IDs. Parameters ---------- Returns ------- list of int List of node IDs as created by `__call__` """ return list(self().keys()) def index(self) -> npt.NDArray[int]: """Retrives an array of element indexes. Parameters ---------- Returns ------- array-like Array of node indexes. """ return np.arange(len(self())) def coords(self) -> npt.NDArray[np.float32]: """Retrieve the coordinates of mesh nodes Parameters ---------- Returns ------- array-like Coordinates of the mesh nodes as returned by `BaseMesh.coord` """ return self.mesh.coord def values(self): """Retrieve the values stored for mesh nodes Parameters ---------- Returns ------- array-like Values on the mesh nodes as returned by `BaseMesh.values` """ return self.mesh.values def get_index_by_id(self, node_id): """Converts mesh ID to mesh index. Parameters ---------- node_id : int ID of the node of interest Returns ------- int Index of the node of interest """ return self.id_to_index[node_id] def get_id_by_index(self, index: int): """Converts mesh index to mesh ID. Parameters ---------- index : int Index of the node of interest. Returns ------- int ID of the node of interest """ return self.index_to_id[index] @property def id_to_index(self) -> Dict[int, int]: """Read-only property returning the mapping of ID to index Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._id_to_index is None: self._id_to_index = {node_id: index for index, node_id in enumerate(self().keys())} return self._id_to_index @property def index_to_id(self) -> Dict[int, int]: """Read-only property returning the mapping of index to ID Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._index_to_id is None: self._index_to_id = dict(enumerate(self().keys())) return self._index_to_id # def get_indexes_around_index(self, index): # indexes_around_index = self.__dict__.get('indexes_around_index') # if indexes_around_index is None: # def append(geom): # for simplex in geom: # for i, j in permutations(simplex, 2): # indexes_around_index[i].add(j) # indexes_around_index = defaultdict(set) # append(self.gr3.elements.triangles()) # append(self.gr3.elements.quads()) # self.__dict__['indexes_around_index'] = indexes_around_index # return list(indexes_around_index[index]) class Elements: """Helper class for handling mesh elements. Attributes ---------- Methods -------- __call__() Creates a mapping between element IDs and associated node IDs. id() Returns a list of element IDs. index() Returns an array of element indexes. array() Creates and returns a masked array of element node indices. triangles() Creates and returns a 2D array of triangular element node indices. quads() Creates and returns a 2D array of quadrangular element node indices. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. geodataframe() Creates and returns a dataframe of with polygon entires for each element. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize the element handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles elements info. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> Dict[int, npt.NDArray[int]]: """Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ elements = {i+1: index+1 for i, index in enumerate(self.mesh.msh_t.tria3['index'])} elements.update({i+len(elements)+1: index+1 for i, index in enumerate(self.mesh.msh_t.quad4['index'])}) return elements @lru_cache(maxsize=1) def id(self) -> List[int]: """Retrieves the list of element IDs as returned by `__call__` Parameters ---------- Returns ------- list of int List of element IDs. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return list(self().keys()) @lru_cache(maxsize=1) def index(self) -> npt.NDArray[int]: """Retrieves an array of element indices Parameters ---------- Returns ------- npt.NDArray 1D array of element indices. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.arange(len(self())) def array(self) -> npt.NDArray[int]: """Retrieves a masked array of element node IDs. The return value is ``n x m`` where ``n`` is the number of elements and ``m`` is the maximum number of element nodes, e.g. if there are only trias, then it's 3, for trias and quads it is 4. Parameters ---------- Returns ------- npt.NDArray Masked array where elements with fewer associated nodes have trailing masked node columns in the array. """ rank = int(max(map(len, self().values()))) array = np.full((len(self()), rank), -1) for i, elem_nd_ids in enumerate(self().values()): row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))) array[i, :len(row)] = row return np.ma.masked_equal(array, -1) @lru_cache(maxsize=1) def triangles(self) -> npt.NDArray[int]: """Retrieves an array of tria element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for triangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 3]) @lru_cache(maxsize=1) def quads(self): """Retrieves an array of quad element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for quadrangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 4]) def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.triangles().tolist() for quad in self.quads(): # TODO: Not tested. triangles.append([quad[0], quad[1], quad[3]]) triangles.append([quad[1], quad[2], quad[3]]) return Triangulation( self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) def geodataframe(self) -> gpd.GeoDataFrame: """Create polygons for each element and return in dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe created from entries of `Polygon` type for each element. """ data = [] for elem_id, elem_nd_ids in self().items(): data.append({ 'geometry': Polygon( self.mesh.coord[list( map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) class Boundaries: """Helper class for mesh boundary condition calculation Attributes ---------- data : dict Mapping for boundary information Methods ------- __call__() Retrieves a dataframe for all boundary shapes and type info. __len__() Gets the number of calculated boundary segments. ocean() Retrieves a dataframe containing shapes and type info of ocean boundaries land() Retrieves a dataframe containing shapes and type info of land boundaries interior() Retrieves a dataframe containing shapes and type info of island boundaries auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1) Automatically generate boundary information based on the input land indicator `threshold` """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize boundary helper object Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates boundaries. """ # TODO: Add a way to manually initialize self.mesh = mesh self._ocean = gpd.GeoDataFrame() self._land = gpd.GeoDataFrame() self._interior = gpd.GeoDataFrame() self._data = defaultdict(defaultdict) @lru_cache(maxsize=1) def _init_dataframes(self) -> None: """Internal: Creates boundary dataframes based on boundary data Parameters ---------- Returns ------- None Notes ----- This method doesn't have any return value, but it is cached so that on re-execution it doesn't recalculate. """ boundaries = self._data ocean_boundaries = [] land_boundaries = [] interior_boundaries = [] if boundaries is not None: for ibtype, bnds in boundaries.items(): if ibtype is None: for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) ocean_boundaries.append({ 'id': bnd_id, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) elif str(ibtype).endswith('1'): for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) interior_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) else: for bnd_id, data in bnds.items(): _indexes = np.array(data['indexes']) if _indexes.ndim > 1: # ndim > 1 implies we're dealing with an ADCIRC # mesh that includes boundary pairs, such as weir new_indexes = [] for i, line in enumerate(_indexes.T): if i % 2 != 0: new_indexes.extend(np.flip(line)) else: new_indexes.extend(line) _indexes = np.array(new_indexes).flatten() else: _indexes = _indexes.flatten() indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes)) land_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) self._ocean = gpd.GeoDataFrame(ocean_boundaries) self._land = gpd.GeoDataFrame(land_boundaries) self._interior = gpd.GeoDataFrame(interior_boundaries) def ocean(self) -> gpd.GeoDataFrame: """Retrieve the ocean boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of ocean open boundary. """ self._init_dataframes() return self._ocean def land(self): """Retrieve the land boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of land boundary. """ self._init_dataframes() return self._land def interior(self): """Retrieve the island boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of island boundary. """ self._init_dataframes() return self._interior @property def data(self) -> Dict[Optional[int], Any]: """Read-only property referencing the boundary data dictionary""" return self._data @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ self._init_dataframes() data = [] for bnd in self.ocean().itertuples(): data.append({ 'id': bnd.id, 'ibtype': None, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.land().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.interior().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def __len__(self) -> int: """Returns the number of boundary segments""" return len(self()) def auto_generate( self, threshold: float = 0., land_ibtype: int = 0, interior_ibtype: int = 1, ): """Automatically detect boundaries based on elevation data. Parameters ---------- threshold : float, default=0 Threshold above which nodes are considered dry nodes for ocean vs land boundary detection land_ibtype : int, default=0 Value to assign to land boundary type interior_ibtype : int, default=1 Value to assign to island boundary type Returns ------- None Raises ------ ValueError If any of the values assigned to a mesh node is `np.nan`. Notes ----- An edge is considered dry if any of the attached nodes are dry (its elevation is larger than or equal to the `threshold`). """ values = self.mesh.value if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") coords = self.mesh.msh_t.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} polys = utils.get_mesh_polygons(self.mesh.msh_t) # TODO: Split using shapely to get bdry segments boundaries = defaultdict(defaultdict) bdry_type = dict get_id = self.mesh.nodes.get_id_by_index # generate exterior boundaries for poly in polys: ext_ring_coo = poly.exterior.coords ext_ring = np.array([ (coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[e + 1]]) for e, coo in enumerate(ext_ring_coo[:-1])]) # find boundary edges edge_tag = np.full(ext_ring.shape, 0) edge_tag[ np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1 edge_tag[ np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1 edge_tag[ np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1 edge_tag[ np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1 # sort boundary edges ocean_boundary = [] land_boundary = [] for i, (e0, e1) in enumerate(edge_tag): if np.any(np.asarray((e0, e1)) == 1): land_boundary.append(tuple(ext_ring[i, :])) elif np.any(np.asarray((e0, e1)) == -1): ocean_boundary.append(tuple(ext_ring[i, :])) # ocean_boundaries = utils.sort_edges(ocean_boundary) # land_boundaries = utils.sort_edges(land_boundary) ocean_boundaries = [] if len(ocean_boundary) != 0: #pylint: disable=not-an-iterable ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist()) ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs ocean_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in ocean_segs] land_boundaries = [] if len(land_boundary) != 0: #pylint: disable=not-an-iterable land_segs = linemerge(coords[np.array(land_boundary)].tolist()) land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs land_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in land_segs] _bnd_id = len(boundaries[None]) for bnd in ocean_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[None][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # add land boundaries _bnd_id = len(boundaries[land_ibtype]) for bnd in land_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[land_ibtype][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # generate interior boundaries _bnd_id = 0 interior_boundaries = defaultdict() for poly in polys: interiors = poly.interiors for interior in interiors: int_ring_coo = interior.coords int_ring = [ (coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[e + 1]]) for e, coo in enumerate(int_ring_coo[:-1])] # TODO: Do we still need these? e0, e1 = [list(t) for t in zip(*int_ring)] if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0: e0 = e0[::-1] e1 = e1[::-1] e0 = [get_id(vert) for vert in e0] e0.append(e0[0]) interior_boundaries[_bnd_id] = e0 _bnd_id += 1 for bnd_id, data in interior_boundaries.items(): boundaries[interior_ibtype][bnd_id] = bdry_type( indexes=data, properties={}) self._data = boundaries self._init_dataframes.cache_clear() self.__call__.cache_clear() self._init_dataframes() SortedRingType = Dict[int, Dict[Literal['exterior', 'interiors'], Union[npt.NDArray, List[npt.NDArray]]] ] def sort_rings( index_rings: List[List[Tuple[int, int]]], vertices: npt.NDArray[np.float32]) -> SortedRingType: """Sorts a list of index-rings. Takes a list of unsorted index rings and sorts them into "exterior" and "interior" components. Any doubly-nested rings are considered exterior rings. Parameters ---------- index_rings : List[List[Tuple[int, int]]] Unosorted list of list of mesh edges as specified by end node indexs of each edge. vertices : npt.NDArray[np.float32] 2D ``n x 2`` array of node coordinate couples. Returns ------- SortedRingType Dictionary of information aboout polygon boundaries extracted based on the input Notes ----- The return value is a mapping of ring index to dictionary containing exterior and interior linear ring information as numpy array This function is not currently used, instead a different faster approach is used for boundary and polygon calculation from elements. """ # TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can # probably be optimized using shapely. # sort index_rings into corresponding "polygons" areas = [] for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = {} _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = [] for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = [] for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings def _mesh_interpolate_worker( coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[str, Path], chunk_size: Optional[int], method: Literal['spline', 'linear', 'nearest'] = "spline", filter_by_shape: bool = False): """Interpolator worker function to be used in parallel calls Parameters ---------- coords : npt.NDArray[np.float32] Mesh node coordinates. coords_crs : CRS Coordinate reference system of the input mesh coordinates. raster_path : str or Path Path to the raster temporary working file. chunk_size : int or None Chunk size for windowing over the raster. method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- idxs : npt.NDArray[bool] Mask of the nodes whose values are updated by current interpolation values : npt.NDArray[np.float32] Interpolated values. Raises ------ ValueError If specified interpolation `method` is not supported. """ coords = np.array(coords) raster = Raster(raster_path) idxs = [] values = [] for window in raster.iter_windows(chunk_size=chunk_size, overlap=2): if not raster.crs.equals(coords_crs): transformer = Transformer.from_crs( coords_crs, raster.crs, always_xy=True) # pylint: disable=E0633 coords[:, 0], coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) xi = raster.get_x(window) yi = raster.get_y(window) # Use masked array to ignore missing values from DEM zi = raster.get_values(window=window, masked=True) if not filter_by_shape: _idxs = np.logical_and( np.logical_and( np.min(xi) <= coords[:, 0], np.max(xi) >= coords[:, 0]), np.logical_and( np.min(yi) <= coords[:, 1], np.max(yi) >= coords[:, 1])) else: shape = raster.get_multipolygon() gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1]) _idxs = gs_pt.intersects(shape) interp_mask = None if method == 'spline': f = RectBivariateSpline( xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0, # bbox=[min(x), max(x), min(y), max(y)] # ?? ) _values = f.ev(coords[_idxs, 0], coords[_idxs, 1]) elif method in ['nearest', 'linear']: # Inspired by StackOverflow 35807321 if np.any(zi.mask): m_interp = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method ) # Pick nodes NOT "contaminated" by masked values interp_mask = m_interp(coords[_idxs]) > 0 f = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi).T, method=method ) _values = f(coords[_idxs]) else: raise ValueError( f"Invalid value method specified <{method}>!") if interp_mask is not None: # pylint: disable=invalid-unary-operand-type helper = np.ones_like(_values).astype(bool) helper[interp_mask] = False # _idxs is inverse mask _idxs[_idxs] = helper _values = _values[~interp_mask] idxs.append(_idxs) values.append(_values) return (np.hstack(idxs), np.hstack(values))
def write( self, path: Union[str, os.PathLike], overwrite: bool = False, format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622 ) -> None: """Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported. """ path = pathlib.Path(path) if path.exists() and overwrite is not True: raise IOError( f'File {str(path)} exists and overwrite is not True.') if format == 'grd': grd_dict = utils.msh_t_to_grd(self.msh_t) if self._boundaries and self._boundaries.data: grd_dict.update(boundaries=self._boundaries.data) grd.write(grd_dict, path, overwrite) elif format == '2dm': sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite) elif format == 'msh': savemsh(str(path), self.msh_t) elif format == 'vtk': savevtk(str(path), self.msh_t) else: raise ValueError(f'Unhandled format {format}.')
117
164
"""This module defines classes that handle mesh and mesh operations. This module defines a factory class for mesh, similar to geometry and size function factory class. It also defines concrete mesh types. Currently two concrete mesh types are defined for generic Eucledian mesh and specific 2D Eucledian mesh. """ from functools import lru_cache import logging from multiprocessing import Pool, cpu_count import os import pathlib from collections import defaultdict import warnings from typing import Union, List, Tuple, Dict, Any, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal import pandas as pd import geopandas as gpd from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk from matplotlib.path import Path from matplotlib.transforms import Bbox from matplotlib.tri import Triangulation from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np import numpy.typing as npt from pyproj import CRS, Transformer from scipy.interpolate import ( RectBivariateSpline, RegularGridInterpolator) from shapely.geometry import ( LineString, box, Polygon, MultiPolygon) from shapely.ops import polygonize, linemerge from ocsmesh import utils from ocsmesh.raster import Raster from ocsmesh.mesh.base import BaseMesh from ocsmesh.mesh.parsers import grd, sms2dm _logger = logging.getLogger(__name__) class EuclideanMesh(BaseMesh): """Generic Euclidean mesh class This is the base class for 2D or 3D Euclidean mesh. Attributes ---------- tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t] Reference to underlying jigsaw mesh's triangle element structure. triangles : npt.NDArray[np.float32] Array of node index for triangular elements. quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t] Reference to underlying jigsaw mesh's quadrangle element structure. quads : npt.NDArray[np.float32] Array of node index for quadrangular elements. crs : CRS Coodrinate reference system of the mesh object hull : Hull Handle to hull calculation helper object nodes : Nodes Handle to node handler helper object elements : Elements Handle to element handler helper object Methods ------- write(path, overwrite=False, format='grd') Export mesh object to the disk in the specified format. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type. """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID != 'euclidean-mesh': raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, ' "but expected 'euclidean-mesh'.") if not hasattr(mesh, 'crs'): warnings.warn('Input mesh has no CRS information.') mesh.crs = None else: if not isinstance(mesh.crs, CRS): raise ValueError(f'crs property must be of type {CRS}, not ' f'type {type(mesh.crs)}.') self._hull = None self._nodes = None self._elements = None self._msh_t = mesh def write( self, path: Union[str, os.PathLike], overwrite: bool = False, format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622 ) -> None: """Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported. """ path = pathlib.Path(path) if path.exists() and overwrite is not True: raise IOError( f'File {str(path)} exists and overwrite is not True.') if format == 'grd': grd_dict = utils.msh_t_to_grd(self.msh_t) if self._boundaries and self._boundaries.data: grd_dict.update(boundaries=self._boundaries.data) grd.write(grd_dict, path, overwrite) elif format == '2dm': sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite) elif format == 'msh': savemsh(str(path), self.msh_t) elif format == 'vtk': savevtk(str(path), self.msh_t) else: raise ValueError(f'Unhandled format {format}.') @property def tria3(self): """Reference to underlying mesh tirangle element structure""" return self.msh_t.tria3 @property def triangles(self): """Reference to underlying mesh triangle element index array""" return self.msh_t.tria3['index'] @property def quad4(self): """Reference to underlying mesh quadrangle element structure""" return self.msh_t.quad4 @property def quads(self): """Reference to underlying mesh quadrangle element index array""" return self.msh_t.quad4['index'] @property def crs(self): """Reference to underlying mesh crs""" return self.msh_t.crs @property def hull(self): """Reference to hull calculator helper object""" if self._hull is None: self._hull = Hull(self) return self._hull @property def nodes(self): """Reference to node handler helper object""" if self._nodes is None: self._nodes = Nodes(self) return self._nodes @property def elements(self): """Reference to element handler helper object""" if self._elements is None: self._elements = Elements(self) return self._elements class EuclideanMesh2D(EuclideanMesh): """2D Euclidean mesh definition Attributes ---------- boundaries vert2 value bbox Methods ------- get_bbox(crs=None, output_type=None) Gets the bounding box of the mesh elements. tricontourf(**kwargs) Create a contour plot from the value data on the nodes of the mesh interpolate(raster, method='spline', nprocs=None) Interpolate raster date on the nodes. get_contour(level) Get contour lines from node value data at specified levels. get_multipolygon(zmin=None, zmax=None) Get multipolygon of the mesh hull. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``. """ super().__init__(mesh) self._boundaries = None if mesh.ndims != +2: raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, ' "but expected ndims=2.") if len(self.msh_t.value) == 0: self.msh_t.value = np.array( np.full((self.vert2['coord'].shape[0], 1), np.nan)) def get_bbox( self, crs: Union[str, CRS, None] = None, output_type: Literal[None, 'polygon', 'bbox'] = None ) -> Union[Polygon, Bbox]: """Get the bounding box of mesh elements. Parameters ---------- crs : str or CRS or None, default=None CRS to transform the calculated bounding box into before returning output_type : { None, 'polygon', 'bbox'}, default=None Output type Returns ------- Polygon or Bbox Bounding box of the mesh elements. """ output_type = 'polygon' if output_type is None else output_type xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0]) ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1]) crs = self.crs if crs is None else crs if crs is not None: if not self.crs.equals(crs): transformer = Transformer.from_crs( self.crs, crs, always_xy=True) # pylint: disable=E0633 (xmin, xmax), (ymin, ymax) = transformer.transform( (xmin, xmax), (ymin, ymax)) if output_type == 'polygon': # pylint: disable=R1705 return box(xmin, ymin, xmax, ymax) elif output_type == 'bbox': return Bbox([[xmin, ymin], [xmax, ymax]]) raise TypeError( 'Argument output_type must a string literal \'polygon\' or ' '\'bbox\'') @property def boundaries(self): """Handle to boundaries calculator helper object""" if self._boundaries is None: self._boundaries = Boundaries(self) return self._boundaries def tricontourf(self, **kwargs) -> Axes: """Generate contour for the data of triangular elements of the mesh Parameters ---------- **kwargs : dict, optional Passed to underlying `matplotlib` API. Returns ------- Axes Axes on which the filled contour is drawn. """ return utils.tricontourf(self.msh_t, **kwargs) def interpolate( self, raster: Union[Raster, List[Raster]], method: Literal['spline', 'linear', 'nearest'] = 'spline', nprocs: Optional[int] = None, info_out_path: Union[pathlib.Path, str, None] = None, filter_by_shape: bool = False ) -> None: """Interplate values from raster inputs to the mesh nodes. Parameters ---------- raster : Raster or list of Raster A single or a list of rasters from which values are interpolated onto the mesh method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. nprocs : int or None, default=None Number of workers to use when interpolating data. info_out_path : pathlike or str or None Path for the output node interpolation information file filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- None """ if isinstance(raster, Raster): raster = [raster] nprocs = -1 if nprocs is None else nprocs nprocs = cpu_count() if nprocs == -1 else nprocs # Fix an issue on Jupyter notebook where having pool execute # interpolation even in case of nprocs == 1 would results in # application getting stuck if nprocs > 1: with Pool(processes=nprocs) as pool: res = pool.starmap( _mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] ) pool.join() else: res = [_mesh_interpolate_worker( self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] values = self.msh_t.value.flatten() interp_info_map = {} for (mask, _values), rast in zip(res, raster): values[mask] = _values if info_out_path is not None: vert_cs = None rast_crs = rast.crs if rast_crs.is_vertical: if rast_crs.sub_crs_list is not None: for sub_crs in rast_crs.sub_crs_list: if sub_crs.is_vertical: # TODO: What if sub CRS is compound, etc.? vert_cs = sub_crs elif rast_crs.source_crs is not None: if rast_crs.source_crs.is_vertical: # TODO: What if source CRS is compound, etc.? vert_cs = rast_crs.source_crs vert_cs_name = vert_cs.name idxs = np.argwhere(mask).ravel() interp_info_map.update({ idx: (rast.path, vert_cs_name) for idx in idxs}) if info_out_path is not None: coords = self.msh_t.vert2['coord'].copy() geo_coords = coords.copy() if not self.crs.is_geographic: transformer = Transformer.from_crs( self.crs, CRS.from_epsg(4326), always_xy=True) # pylint: disable=E0633 geo_coords[:, 0], geo_coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) vd_idxs=np.array(list(interp_info_map.keys())) df_interp_info = pd.DataFrame( index=vd_idxs, data={ 'x': coords[vd_idxs, 0], 'y': coords[vd_idxs, 1], 'lat': geo_coords[vd_idxs, 0], 'lon': geo_coords[vd_idxs, 1], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()] } ) df_interp_info.sort_index().to_csv( info_out_path, header=False, index=True) self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) def get_contour(self, level: float) -> LineString: """Extract contour lines at the specified `level` from mesh values Parameters ---------- level : float The level at which contour lines must be extracted. Returns ------- LineString Extracted and merged contour lines. Raises ------ ValueError If mesh has nodes that have null value `np.nan`. """ # ONLY SUPPORTS TRIANGLES for attr in ['quad4', 'hexa8']: if len(getattr(self.msh_t, attr)) > 0: warnings.warn( 'Mesh contour extraction only supports triangles') coords = self.msh_t.vert2['coord'] values = self.msh_t.value trias = self.msh_t.tria3['index'] if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") x, y = coords[:, 0], coords[:, 1] features = [] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) _logger.debug('Computing contours...') fig, ax = plt.subplots() ax.tricontour( x, y, trias, values.ravel(), levels=[level]) plt.close(fig) for path_collection in ax.collections: for path in path_collection.get_paths(): try: features.append(LineString(path.vertices)) except ValueError: # LineStrings must have at least 2 coordinate tuples pass return linemerge(features) def get_multipolygon( self, zmin: Optional[float] = None, zmax: Optional[float] = None ) -> MultiPolygon: """Calculate multipolygon covering mesh elements (hull) Parameters ---------- zmin : float or None Minimum elevation to consider for multipolygon extraction zmax : float or None Maximum elevation to consider for multipolygon extraction Returns ------- MultiPolygon Calculated multipolygon shape """ values = self.msh_t.value mask = np.ones(values.shape) if zmin is not None: mask = np.logical_and(mask, values > zmin) if zmax is not None: mask = np.logical_and(mask, values < zmax) # Assuming value is of shape (N, 1) # ravel to make sure it's 1D verts_in = np.argwhere(mask).ravel() clipped_mesh = utils.clip_mesh_by_vertex( self.msh_t, verts_in, can_use_other_verts=True) boundary_edges = utils.get_boundary_edges(clipped_mesh) coords = clipped_mesh.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} poly_gen = polygonize(coords[boundary_edges]) polys = list(poly_gen) polys = sorted(polys, key=lambda p: p.area, reverse=True) rings = [p.exterior for p in polys] n_parents = np.zeros((len(rings),)) represent = np.array([r.coords[0] for r in rings]) for e, ring in enumerate(rings[:-1]): path = Path(ring.coords, closed=True) n_parents = n_parents + np.pad( np.array([ path.contains_point(pt) for pt in represent[e+1:]]), (e+1, 0), 'constant', constant_values=0) # Get actual polygons based on logic described above polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2] return MultiPolygon(polys) @property def vert2(self): """Reference to underlying mesh 2D vertices structure""" return self.msh_t.vert2 @property def value(self): """Reference to underlying mesh values""" return self.msh_t.value @property def bbox(self): """Calculates and returns bounding box of the mesh hull. See Also -------- get_bbox """ return self.get_bbox() MeshType = Union[EuclideanMesh2D] class Mesh(BaseMesh): """Mesh object factory Factory class that creates and returns concrete mesh object based on the input types. Methods ------- open(path, crs=None) Read mesh data from a file on disk. """ def __new__(cls, mesh: jigsaw_msh_t) -> MeshType: """Construct a concrete mesh object. Parameters ---------- mesh : jigsaw_msh_t Input jigsaw mesh object Returns ------- MeshType Mesh object created from the input Raises ------ TypeError Input `mesh` is not a `jigsaw_msh_t` object. NotImplementedError Input `mesh` object cannot be used to create a EuclideanMesh2D """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID == 'euclidean-mesh': if mesh.ndims == 2: return EuclideanMesh2D(mesh) raise NotImplementedError( f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not ' 'handled.') raise NotImplementedError(f'mshID={mesh.mshID} not handled.') @staticmethod def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType: """Read mesh from a file on disk Parameters ---------- path : path-like Path to the file containig mesh. crs : CRS or None, default=None CRS of the mesh in the path. Overwrites any info read from file, no transformation is done. Returns ------- MeshType Mesh object created by reading the file. Raises ------ TypeError If cannot determine the input mesh type. Notes ----- Currently only SMS-2DM and GRD formats are supported for reading. """ try: msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs)) msh_t.value = np.negative(msh_t.value) return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 if 'not a valid grd file' in str(e): pass else: raise e try: return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs))) except ValueError: pass try: msh_t = jigsaw_msh_t() loadmsh(msh_t, path) msh_t.crs = crs return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 pass raise TypeError( f'Unable to automatically determine file type for {str(path)}.') class Rings: """Helper class for handling mesh rings. This is a helper class to manage the calculation of internal and external rings of the mesh polygon or hull. Attributes ---------- Methods ------- __call__() Returns all rings of the mesh hull interior() Return the interior rings of the mesh hull exterior() Return the exterior rings of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the ring calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates rings. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ polys = utils.get_mesh_polygons(self.mesh.msh_t) data = [] bnd_id = 0 for poly in polys: data.append({ "geometry": poly.exterior, "bnd_id": bnd_id, "type": 'exterior' }) for interior in poly.interiors: data.append({ "geometry": interior, "bnd_id": bnd_id, "type": 'interior' }) bnd_id = bnd_id + 1 return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Extracts the exterior ring from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior ring of the mesh hull polygon. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Extracts the interior rings from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior rings of the mesh hull polygon. """ return self().loc[self()['type'] == 'interior'] class Edges: """Helper class for handling mesh boundary edges. Attributes ---------- Methods ------- __call__() Return all boundary edges of the mesh hull interior() Return the interior boundary edges of the mesh hull exterior() Return the exterior boundary edges of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the edge calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which boundary edges are calculated. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for ring in self.mesh.hull.rings().itertuples(): coords = ring.geometry.coords for i in range(1, len(coords)): data.append({ "geometry": LineString([coords[i-1], coords[i]]), "bnd_id": ring.bnd_id, "type": ring.type}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Retruns exterior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Retruns interior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'interior'] class Hull: """Helper class for handling mesh hull calculations. This class wraps the functionality of ring and edge classes and adds additional methods to calculate or extract the polygon or triangulation of the mesh Attributes ---------- Methods ------- __call__() Calculates all the polys from all mesh rings exterior() Calculates the exterior rings of the mesh hull. interior() Calculates the interior rings of the mesh hull. implode() Calculates all the polygons (including isolated domain islands) in the mesh and returns a table of polygons. multipolygon() Calculates all the polygons (including isolated domain islands) in the mesh and returns a multipolygon. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize helper class for handling mesh hull calculations Parameters ---------- mesh : EuclideanMesh Input mesh for which hull calculations are done. Notes ----- This object holds onto the ring and edge calculator objects as well as a reference to the input mesh. """ self.mesh = mesh self.rings = Rings(mesh) self.edges = Edges(mesh) @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for bnd_id in np.unique(self.rings()['bnd_id'].tolist()): exterior = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior')] interiors = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior')] data.append({ "geometry": Polygon( exterior.iloc[0].geometry.coords, [row.geometry.coords for _, row in interiors.iterrows()]), "bnd_id": bnd_id }) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Creates polygons from exterior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from exterior rings of the mesh hull """ data = [] for exterior in self.rings().loc[ self.rings()['type'] == 'exterior'].itertuples(): data.append({"geometry": Polygon(exterior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def interior(self) -> gpd.GeoDataFrame: """Creates polygons from interior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from interior rings of the mesh hull """ data = [] for interior in self.rings().loc[ self.rings()['type'] == 'interior'].itertuples(): data.append({"geometry": Polygon(interior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def implode(self) -> gpd.GeoDataFrame: """Creates a dataframe from mesh polygons. Parameters ---------- Returns ------ gpd.GeoDataFrame Dataframe containing polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The difference of the return value of this method and `__call__` is that the `implode` returns a dataframe with a single `MultiPolygon` where as `__call__` returns a dataframe with multiple `Polygon` entries with associated `bnd_id`. """ return gpd.GeoDataFrame( {"geometry": MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) def multipolygon(self) -> MultiPolygon: """Returns mesh multi-polygons. Parameters ---------- Returns ------ MultiPolygon Combined shape of polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. implode() Dataframe with a single combined multipolygon of the mesh polygons. Notes ----- The difference of the return value of this method and `implode` is that `multipolygon` returns a `MultiPolygon` object where as `implode` returns a dataframe warpping the multipolygon object. """ mp = self.implode().iloc[0].geometry if isinstance(mp, Polygon): mp = MultiPolygon([mp]) return mp def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.mesh.msh_t.tria3['index'].tolist() for quad in self.mesh.msh_t.quad4['index']: triangles.extend([ [quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]] ]) return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) class Nodes: """Helper class for handling mesh nodes. Attributes ---------- id_to_index : dict Mapping to convert node IDs to node indexes. index_to_id : dict Mapping to convert node indexes to node IDs. Methods ------- __call__() Creates a mapping between node IDs (index + 1) and node coordinates id() Returns list of node IDs. index() Return array of node indices. coords() Return mesh coordinates. values() Return values stored for mesh nodes. get_index_by_id(node_id) Get the node index based on node ID. get_id_by_index(index) Get the node ID based on the node index. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes node handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles nodes info. """ self.mesh = mesh self._id_to_index = None self._index_to_id = None @lru_cache(maxsize=1) def __call__(self) -> Dict[int, int]: """Creates a mapping between node IDs and indexes. Parameters ---------- Returns ------- dict Mapping between node IDs and indexes. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return {i+1: coord for i, coord in enumerate(self.coords())} def id(self) -> List[int]: """Retrives a list of element IDs. Parameters ---------- Returns ------- list of int List of node IDs as created by `__call__` """ return list(self().keys()) def index(self) -> npt.NDArray[int]: """Retrives an array of element indexes. Parameters ---------- Returns ------- array-like Array of node indexes. """ return np.arange(len(self())) def coords(self) -> npt.NDArray[np.float32]: """Retrieve the coordinates of mesh nodes Parameters ---------- Returns ------- array-like Coordinates of the mesh nodes as returned by `BaseMesh.coord` """ return self.mesh.coord def values(self): """Retrieve the values stored for mesh nodes Parameters ---------- Returns ------- array-like Values on the mesh nodes as returned by `BaseMesh.values` """ return self.mesh.values def get_index_by_id(self, node_id): """Converts mesh ID to mesh index. Parameters ---------- node_id : int ID of the node of interest Returns ------- int Index of the node of interest """ return self.id_to_index[node_id] def get_id_by_index(self, index: int): """Converts mesh index to mesh ID. Parameters ---------- index : int Index of the node of interest. Returns ------- int ID of the node of interest """ return self.index_to_id[index] @property def id_to_index(self) -> Dict[int, int]: """Read-only property returning the mapping of ID to index Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._id_to_index is None: self._id_to_index = {node_id: index for index, node_id in enumerate(self().keys())} return self._id_to_index @property def index_to_id(self) -> Dict[int, int]: """Read-only property returning the mapping of index to ID Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._index_to_id is None: self._index_to_id = dict(enumerate(self().keys())) return self._index_to_id # def get_indexes_around_index(self, index): # indexes_around_index = self.__dict__.get('indexes_around_index') # if indexes_around_index is None: # def append(geom): # for simplex in geom: # for i, j in permutations(simplex, 2): # indexes_around_index[i].add(j) # indexes_around_index = defaultdict(set) # append(self.gr3.elements.triangles()) # append(self.gr3.elements.quads()) # self.__dict__['indexes_around_index'] = indexes_around_index # return list(indexes_around_index[index]) class Elements: """Helper class for handling mesh elements. Attributes ---------- Methods -------- __call__() Creates a mapping between element IDs and associated node IDs. id() Returns a list of element IDs. index() Returns an array of element indexes. array() Creates and returns a masked array of element node indices. triangles() Creates and returns a 2D array of triangular element node indices. quads() Creates and returns a 2D array of quadrangular element node indices. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. geodataframe() Creates and returns a dataframe of with polygon entires for each element. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize the element handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles elements info. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> Dict[int, npt.NDArray[int]]: """Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ elements = {i+1: index+1 for i, index in enumerate(self.mesh.msh_t.tria3['index'])} elements.update({i+len(elements)+1: index+1 for i, index in enumerate(self.mesh.msh_t.quad4['index'])}) return elements @lru_cache(maxsize=1) def id(self) -> List[int]: """Retrieves the list of element IDs as returned by `__call__` Parameters ---------- Returns ------- list of int List of element IDs. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return list(self().keys()) @lru_cache(maxsize=1) def index(self) -> npt.NDArray[int]: """Retrieves an array of element indices Parameters ---------- Returns ------- npt.NDArray 1D array of element indices. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.arange(len(self())) def array(self) -> npt.NDArray[int]: """Retrieves a masked array of element node IDs. The return value is ``n x m`` where ``n`` is the number of elements and ``m`` is the maximum number of element nodes, e.g. if there are only trias, then it's 3, for trias and quads it is 4. Parameters ---------- Returns ------- npt.NDArray Masked array where elements with fewer associated nodes have trailing masked node columns in the array. """ rank = int(max(map(len, self().values()))) array = np.full((len(self()), rank), -1) for i, elem_nd_ids in enumerate(self().values()): row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))) array[i, :len(row)] = row return np.ma.masked_equal(array, -1) @lru_cache(maxsize=1) def triangles(self) -> npt.NDArray[int]: """Retrieves an array of tria element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for triangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 3]) @lru_cache(maxsize=1) def quads(self): """Retrieves an array of quad element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for quadrangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 4]) def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.triangles().tolist() for quad in self.quads(): # TODO: Not tested. triangles.append([quad[0], quad[1], quad[3]]) triangles.append([quad[1], quad[2], quad[3]]) return Triangulation( self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) def geodataframe(self) -> gpd.GeoDataFrame: """Create polygons for each element and return in dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe created from entries of `Polygon` type for each element. """ data = [] for elem_id, elem_nd_ids in self().items(): data.append({ 'geometry': Polygon( self.mesh.coord[list( map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) class Boundaries: """Helper class for mesh boundary condition calculation Attributes ---------- data : dict Mapping for boundary information Methods ------- __call__() Retrieves a dataframe for all boundary shapes and type info. __len__() Gets the number of calculated boundary segments. ocean() Retrieves a dataframe containing shapes and type info of ocean boundaries land() Retrieves a dataframe containing shapes and type info of land boundaries interior() Retrieves a dataframe containing shapes and type info of island boundaries auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1) Automatically generate boundary information based on the input land indicator `threshold` """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize boundary helper object Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates boundaries. """ # TODO: Add a way to manually initialize self.mesh = mesh self._ocean = gpd.GeoDataFrame() self._land = gpd.GeoDataFrame() self._interior = gpd.GeoDataFrame() self._data = defaultdict(defaultdict) @lru_cache(maxsize=1) def _init_dataframes(self) -> None: """Internal: Creates boundary dataframes based on boundary data Parameters ---------- Returns ------- None Notes ----- This method doesn't have any return value, but it is cached so that on re-execution it doesn't recalculate. """ boundaries = self._data ocean_boundaries = [] land_boundaries = [] interior_boundaries = [] if boundaries is not None: for ibtype, bnds in boundaries.items(): if ibtype is None: for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) ocean_boundaries.append({ 'id': bnd_id, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) elif str(ibtype).endswith('1'): for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) interior_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) else: for bnd_id, data in bnds.items(): _indexes = np.array(data['indexes']) if _indexes.ndim > 1: # ndim > 1 implies we're dealing with an ADCIRC # mesh that includes boundary pairs, such as weir new_indexes = [] for i, line in enumerate(_indexes.T): if i % 2 != 0: new_indexes.extend(np.flip(line)) else: new_indexes.extend(line) _indexes = np.array(new_indexes).flatten() else: _indexes = _indexes.flatten() indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes)) land_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) self._ocean = gpd.GeoDataFrame(ocean_boundaries) self._land = gpd.GeoDataFrame(land_boundaries) self._interior = gpd.GeoDataFrame(interior_boundaries) def ocean(self) -> gpd.GeoDataFrame: """Retrieve the ocean boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of ocean open boundary. """ self._init_dataframes() return self._ocean def land(self): """Retrieve the land boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of land boundary. """ self._init_dataframes() return self._land def interior(self): """Retrieve the island boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of island boundary. """ self._init_dataframes() return self._interior @property def data(self) -> Dict[Optional[int], Any]: """Read-only property referencing the boundary data dictionary""" return self._data @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ self._init_dataframes() data = [] for bnd in self.ocean().itertuples(): data.append({ 'id': bnd.id, 'ibtype': None, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.land().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.interior().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def __len__(self) -> int: """Returns the number of boundary segments""" return len(self()) def auto_generate( self, threshold: float = 0., land_ibtype: int = 0, interior_ibtype: int = 1, ): """Automatically detect boundaries based on elevation data. Parameters ---------- threshold : float, default=0 Threshold above which nodes are considered dry nodes for ocean vs land boundary detection land_ibtype : int, default=0 Value to assign to land boundary type interior_ibtype : int, default=1 Value to assign to island boundary type Returns ------- None Raises ------ ValueError If any of the values assigned to a mesh node is `np.nan`. Notes ----- An edge is considered dry if any of the attached nodes are dry (its elevation is larger than or equal to the `threshold`). """ values = self.mesh.value if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") coords = self.mesh.msh_t.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} polys = utils.get_mesh_polygons(self.mesh.msh_t) # TODO: Split using shapely to get bdry segments boundaries = defaultdict(defaultdict) bdry_type = dict get_id = self.mesh.nodes.get_id_by_index # generate exterior boundaries for poly in polys: ext_ring_coo = poly.exterior.coords ext_ring = np.array([ (coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[e + 1]]) for e, coo in enumerate(ext_ring_coo[:-1])]) # find boundary edges edge_tag = np.full(ext_ring.shape, 0) edge_tag[ np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1 edge_tag[ np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1 edge_tag[ np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1 edge_tag[ np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1 # sort boundary edges ocean_boundary = [] land_boundary = [] for i, (e0, e1) in enumerate(edge_tag): if np.any(np.asarray((e0, e1)) == 1): land_boundary.append(tuple(ext_ring[i, :])) elif np.any(np.asarray((e0, e1)) == -1): ocean_boundary.append(tuple(ext_ring[i, :])) # ocean_boundaries = utils.sort_edges(ocean_boundary) # land_boundaries = utils.sort_edges(land_boundary) ocean_boundaries = [] if len(ocean_boundary) != 0: #pylint: disable=not-an-iterable ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist()) ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs ocean_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in ocean_segs] land_boundaries = [] if len(land_boundary) != 0: #pylint: disable=not-an-iterable land_segs = linemerge(coords[np.array(land_boundary)].tolist()) land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs land_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in land_segs] _bnd_id = len(boundaries[None]) for bnd in ocean_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[None][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # add land boundaries _bnd_id = len(boundaries[land_ibtype]) for bnd in land_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[land_ibtype][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # generate interior boundaries _bnd_id = 0 interior_boundaries = defaultdict() for poly in polys: interiors = poly.interiors for interior in interiors: int_ring_coo = interior.coords int_ring = [ (coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[e + 1]]) for e, coo in enumerate(int_ring_coo[:-1])] # TODO: Do we still need these? e0, e1 = [list(t) for t in zip(*int_ring)] if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0: e0 = e0[::-1] e1 = e1[::-1] e0 = [get_id(vert) for vert in e0] e0.append(e0[0]) interior_boundaries[_bnd_id] = e0 _bnd_id += 1 for bnd_id, data in interior_boundaries.items(): boundaries[interior_ibtype][bnd_id] = bdry_type( indexes=data, properties={}) self._data = boundaries self._init_dataframes.cache_clear() self.__call__.cache_clear() self._init_dataframes() SortedRingType = Dict[int, Dict[Literal['exterior', 'interiors'], Union[npt.NDArray, List[npt.NDArray]]] ] def sort_rings( index_rings: List[List[Tuple[int, int]]], vertices: npt.NDArray[np.float32]) -> SortedRingType: """Sorts a list of index-rings. Takes a list of unsorted index rings and sorts them into "exterior" and "interior" components. Any doubly-nested rings are considered exterior rings. Parameters ---------- index_rings : List[List[Tuple[int, int]]] Unosorted list of list of mesh edges as specified by end node indexs of each edge. vertices : npt.NDArray[np.float32] 2D ``n x 2`` array of node coordinate couples. Returns ------- SortedRingType Dictionary of information aboout polygon boundaries extracted based on the input Notes ----- The return value is a mapping of ring index to dictionary containing exterior and interior linear ring information as numpy array This function is not currently used, instead a different faster approach is used for boundary and polygon calculation from elements. """ # TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can # probably be optimized using shapely. # sort index_rings into corresponding "polygons" areas = [] for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = {} _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = [] for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = [] for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings def _mesh_interpolate_worker( coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[str, Path], chunk_size: Optional[int], method: Literal['spline', 'linear', 'nearest'] = "spline", filter_by_shape: bool = False): """Interpolator worker function to be used in parallel calls Parameters ---------- coords : npt.NDArray[np.float32] Mesh node coordinates. coords_crs : CRS Coordinate reference system of the input mesh coordinates. raster_path : str or Path Path to the raster temporary working file. chunk_size : int or None Chunk size for windowing over the raster. method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- idxs : npt.NDArray[bool] Mask of the nodes whose values are updated by current interpolation values : npt.NDArray[np.float32] Interpolated values. Raises ------ ValueError If specified interpolation `method` is not supported. """ coords = np.array(coords) raster = Raster(raster_path) idxs = [] values = [] for window in raster.iter_windows(chunk_size=chunk_size, overlap=2): if not raster.crs.equals(coords_crs): transformer = Transformer.from_crs( coords_crs, raster.crs, always_xy=True) # pylint: disable=E0633 coords[:, 0], coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) xi = raster.get_x(window) yi = raster.get_y(window) # Use masked array to ignore missing values from DEM zi = raster.get_values(window=window, masked=True) if not filter_by_shape: _idxs = np.logical_and( np.logical_and( np.min(xi) <= coords[:, 0], np.max(xi) >= coords[:, 0]), np.logical_and( np.min(yi) <= coords[:, 1], np.max(yi) >= coords[:, 1])) else: shape = raster.get_multipolygon() gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1]) _idxs = gs_pt.intersects(shape) interp_mask = None if method == 'spline': f = RectBivariateSpline( xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0, # bbox=[min(x), max(x), min(y), max(y)] # ?? ) _values = f.ev(coords[_idxs, 0], coords[_idxs, 1]) elif method in ['nearest', 'linear']: # Inspired by StackOverflow 35807321 if np.any(zi.mask): m_interp = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method ) # Pick nodes NOT "contaminated" by masked values interp_mask = m_interp(coords[_idxs]) > 0 f = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi).T, method=method ) _values = f(coords[_idxs]) else: raise ValueError( f"Invalid value method specified <{method}>!") if interp_mask is not None: # pylint: disable=invalid-unary-operand-type helper = np.ones_like(_values).astype(bool) helper[interp_mask] = False # _idxs is inverse mask _idxs[_idxs] = helper _values = _values[~interp_mask] idxs.append(_idxs) values.append(_values) return (np.hstack(idxs), np.hstack(values))
__init__
Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``.
"""This module defines classes that handle mesh and mesh operations. This module defines a factory class for mesh, similar to geometry and size function factory class. It also defines concrete mesh types. Currently two concrete mesh types are defined for generic Eucledian mesh and specific 2D Eucledian mesh. """ from functools import lru_cache import logging from multiprocessing import Pool, cpu_count import os import pathlib from collections import defaultdict import warnings from typing import Union, List, Tuple, Dict, Any, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal import pandas as pd import geopandas as gpd from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk from matplotlib.path import Path from matplotlib.transforms import Bbox from matplotlib.tri import Triangulation from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np import numpy.typing as npt from pyproj import CRS, Transformer from scipy.interpolate import ( RectBivariateSpline, RegularGridInterpolator) from shapely.geometry import ( LineString, box, Polygon, MultiPolygon) from shapely.ops import polygonize, linemerge from ocsmesh import utils from ocsmesh.raster import Raster from ocsmesh.mesh.base import BaseMesh from ocsmesh.mesh.parsers import grd, sms2dm _logger = logging.getLogger(__name__) class EuclideanMesh(BaseMesh): """Generic Euclidean mesh class This is the base class for 2D or 3D Euclidean mesh. Attributes ---------- tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t] Reference to underlying jigsaw mesh's triangle element structure. triangles : npt.NDArray[np.float32] Array of node index for triangular elements. quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t] Reference to underlying jigsaw mesh's quadrangle element structure. quads : npt.NDArray[np.float32] Array of node index for quadrangular elements. crs : CRS Coodrinate reference system of the mesh object hull : Hull Handle to hull calculation helper object nodes : Nodes Handle to node handler helper object elements : Elements Handle to element handler helper object Methods ------- write(path, overwrite=False, format='grd') Export mesh object to the disk in the specified format. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type. """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID != 'euclidean-mesh': raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, ' "but expected 'euclidean-mesh'.") if not hasattr(mesh, 'crs'): warnings.warn('Input mesh has no CRS information.') mesh.crs = None else: if not isinstance(mesh.crs, CRS): raise ValueError(f'crs property must be of type {CRS}, not ' f'type {type(mesh.crs)}.') self._hull = None self._nodes = None self._elements = None self._msh_t = mesh def write( self, path: Union[str, os.PathLike], overwrite: bool = False, format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622 ) -> None: """Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported. """ path = pathlib.Path(path) if path.exists() and overwrite is not True: raise IOError( f'File {str(path)} exists and overwrite is not True.') if format == 'grd': grd_dict = utils.msh_t_to_grd(self.msh_t) if self._boundaries and self._boundaries.data: grd_dict.update(boundaries=self._boundaries.data) grd.write(grd_dict, path, overwrite) elif format == '2dm': sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite) elif format == 'msh': savemsh(str(path), self.msh_t) elif format == 'vtk': savevtk(str(path), self.msh_t) else: raise ValueError(f'Unhandled format {format}.') @property def tria3(self): """Reference to underlying mesh tirangle element structure""" return self.msh_t.tria3 @property def triangles(self): """Reference to underlying mesh triangle element index array""" return self.msh_t.tria3['index'] @property def quad4(self): """Reference to underlying mesh quadrangle element structure""" return self.msh_t.quad4 @property def quads(self): """Reference to underlying mesh quadrangle element index array""" return self.msh_t.quad4['index'] @property def crs(self): """Reference to underlying mesh crs""" return self.msh_t.crs @property def hull(self): """Reference to hull calculator helper object""" if self._hull is None: self._hull = Hull(self) return self._hull @property def nodes(self): """Reference to node handler helper object""" if self._nodes is None: self._nodes = Nodes(self) return self._nodes @property def elements(self): """Reference to element handler helper object""" if self._elements is None: self._elements = Elements(self) return self._elements class EuclideanMesh2D(EuclideanMesh): """2D Euclidean mesh definition Attributes ---------- boundaries vert2 value bbox Methods ------- get_bbox(crs=None, output_type=None) Gets the bounding box of the mesh elements. tricontourf(**kwargs) Create a contour plot from the value data on the nodes of the mesh interpolate(raster, method='spline', nprocs=None) Interpolate raster date on the nodes. get_contour(level) Get contour lines from node value data at specified levels. get_multipolygon(zmin=None, zmax=None) Get multipolygon of the mesh hull. """ # MASKED: __init__ function (lines 246-269) def get_bbox( self, crs: Union[str, CRS, None] = None, output_type: Literal[None, 'polygon', 'bbox'] = None ) -> Union[Polygon, Bbox]: """Get the bounding box of mesh elements. Parameters ---------- crs : str or CRS or None, default=None CRS to transform the calculated bounding box into before returning output_type : { None, 'polygon', 'bbox'}, default=None Output type Returns ------- Polygon or Bbox Bounding box of the mesh elements. """ output_type = 'polygon' if output_type is None else output_type xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0]) ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1]) crs = self.crs if crs is None else crs if crs is not None: if not self.crs.equals(crs): transformer = Transformer.from_crs( self.crs, crs, always_xy=True) # pylint: disable=E0633 (xmin, xmax), (ymin, ymax) = transformer.transform( (xmin, xmax), (ymin, ymax)) if output_type == 'polygon': # pylint: disable=R1705 return box(xmin, ymin, xmax, ymax) elif output_type == 'bbox': return Bbox([[xmin, ymin], [xmax, ymax]]) raise TypeError( 'Argument output_type must a string literal \'polygon\' or ' '\'bbox\'') @property def boundaries(self): """Handle to boundaries calculator helper object""" if self._boundaries is None: self._boundaries = Boundaries(self) return self._boundaries def tricontourf(self, **kwargs) -> Axes: """Generate contour for the data of triangular elements of the mesh Parameters ---------- **kwargs : dict, optional Passed to underlying `matplotlib` API. Returns ------- Axes Axes on which the filled contour is drawn. """ return utils.tricontourf(self.msh_t, **kwargs) def interpolate( self, raster: Union[Raster, List[Raster]], method: Literal['spline', 'linear', 'nearest'] = 'spline', nprocs: Optional[int] = None, info_out_path: Union[pathlib.Path, str, None] = None, filter_by_shape: bool = False ) -> None: """Interplate values from raster inputs to the mesh nodes. Parameters ---------- raster : Raster or list of Raster A single or a list of rasters from which values are interpolated onto the mesh method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. nprocs : int or None, default=None Number of workers to use when interpolating data. info_out_path : pathlike or str or None Path for the output node interpolation information file filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- None """ if isinstance(raster, Raster): raster = [raster] nprocs = -1 if nprocs is None else nprocs nprocs = cpu_count() if nprocs == -1 else nprocs # Fix an issue on Jupyter notebook where having pool execute # interpolation even in case of nprocs == 1 would results in # application getting stuck if nprocs > 1: with Pool(processes=nprocs) as pool: res = pool.starmap( _mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] ) pool.join() else: res = [_mesh_interpolate_worker( self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] values = self.msh_t.value.flatten() interp_info_map = {} for (mask, _values), rast in zip(res, raster): values[mask] = _values if info_out_path is not None: vert_cs = None rast_crs = rast.crs if rast_crs.is_vertical: if rast_crs.sub_crs_list is not None: for sub_crs in rast_crs.sub_crs_list: if sub_crs.is_vertical: # TODO: What if sub CRS is compound, etc.? vert_cs = sub_crs elif rast_crs.source_crs is not None: if rast_crs.source_crs.is_vertical: # TODO: What if source CRS is compound, etc.? vert_cs = rast_crs.source_crs vert_cs_name = vert_cs.name idxs = np.argwhere(mask).ravel() interp_info_map.update({ idx: (rast.path, vert_cs_name) for idx in idxs}) if info_out_path is not None: coords = self.msh_t.vert2['coord'].copy() geo_coords = coords.copy() if not self.crs.is_geographic: transformer = Transformer.from_crs( self.crs, CRS.from_epsg(4326), always_xy=True) # pylint: disable=E0633 geo_coords[:, 0], geo_coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) vd_idxs=np.array(list(interp_info_map.keys())) df_interp_info = pd.DataFrame( index=vd_idxs, data={ 'x': coords[vd_idxs, 0], 'y': coords[vd_idxs, 1], 'lat': geo_coords[vd_idxs, 0], 'lon': geo_coords[vd_idxs, 1], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()] } ) df_interp_info.sort_index().to_csv( info_out_path, header=False, index=True) self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) def get_contour(self, level: float) -> LineString: """Extract contour lines at the specified `level` from mesh values Parameters ---------- level : float The level at which contour lines must be extracted. Returns ------- LineString Extracted and merged contour lines. Raises ------ ValueError If mesh has nodes that have null value `np.nan`. """ # ONLY SUPPORTS TRIANGLES for attr in ['quad4', 'hexa8']: if len(getattr(self.msh_t, attr)) > 0: warnings.warn( 'Mesh contour extraction only supports triangles') coords = self.msh_t.vert2['coord'] values = self.msh_t.value trias = self.msh_t.tria3['index'] if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") x, y = coords[:, 0], coords[:, 1] features = [] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) _logger.debug('Computing contours...') fig, ax = plt.subplots() ax.tricontour( x, y, trias, values.ravel(), levels=[level]) plt.close(fig) for path_collection in ax.collections: for path in path_collection.get_paths(): try: features.append(LineString(path.vertices)) except ValueError: # LineStrings must have at least 2 coordinate tuples pass return linemerge(features) def get_multipolygon( self, zmin: Optional[float] = None, zmax: Optional[float] = None ) -> MultiPolygon: """Calculate multipolygon covering mesh elements (hull) Parameters ---------- zmin : float or None Minimum elevation to consider for multipolygon extraction zmax : float or None Maximum elevation to consider for multipolygon extraction Returns ------- MultiPolygon Calculated multipolygon shape """ values = self.msh_t.value mask = np.ones(values.shape) if zmin is not None: mask = np.logical_and(mask, values > zmin) if zmax is not None: mask = np.logical_and(mask, values < zmax) # Assuming value is of shape (N, 1) # ravel to make sure it's 1D verts_in = np.argwhere(mask).ravel() clipped_mesh = utils.clip_mesh_by_vertex( self.msh_t, verts_in, can_use_other_verts=True) boundary_edges = utils.get_boundary_edges(clipped_mesh) coords = clipped_mesh.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} poly_gen = polygonize(coords[boundary_edges]) polys = list(poly_gen) polys = sorted(polys, key=lambda p: p.area, reverse=True) rings = [p.exterior for p in polys] n_parents = np.zeros((len(rings),)) represent = np.array([r.coords[0] for r in rings]) for e, ring in enumerate(rings[:-1]): path = Path(ring.coords, closed=True) n_parents = n_parents + np.pad( np.array([ path.contains_point(pt) for pt in represent[e+1:]]), (e+1, 0), 'constant', constant_values=0) # Get actual polygons based on logic described above polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2] return MultiPolygon(polys) @property def vert2(self): """Reference to underlying mesh 2D vertices structure""" return self.msh_t.vert2 @property def value(self): """Reference to underlying mesh values""" return self.msh_t.value @property def bbox(self): """Calculates and returns bounding box of the mesh hull. See Also -------- get_bbox """ return self.get_bbox() MeshType = Union[EuclideanMesh2D] class Mesh(BaseMesh): """Mesh object factory Factory class that creates and returns concrete mesh object based on the input types. Methods ------- open(path, crs=None) Read mesh data from a file on disk. """ def __new__(cls, mesh: jigsaw_msh_t) -> MeshType: """Construct a concrete mesh object. Parameters ---------- mesh : jigsaw_msh_t Input jigsaw mesh object Returns ------- MeshType Mesh object created from the input Raises ------ TypeError Input `mesh` is not a `jigsaw_msh_t` object. NotImplementedError Input `mesh` object cannot be used to create a EuclideanMesh2D """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID == 'euclidean-mesh': if mesh.ndims == 2: return EuclideanMesh2D(mesh) raise NotImplementedError( f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not ' 'handled.') raise NotImplementedError(f'mshID={mesh.mshID} not handled.') @staticmethod def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType: """Read mesh from a file on disk Parameters ---------- path : path-like Path to the file containig mesh. crs : CRS or None, default=None CRS of the mesh in the path. Overwrites any info read from file, no transformation is done. Returns ------- MeshType Mesh object created by reading the file. Raises ------ TypeError If cannot determine the input mesh type. Notes ----- Currently only SMS-2DM and GRD formats are supported for reading. """ try: msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs)) msh_t.value = np.negative(msh_t.value) return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 if 'not a valid grd file' in str(e): pass else: raise e try: return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs))) except ValueError: pass try: msh_t = jigsaw_msh_t() loadmsh(msh_t, path) msh_t.crs = crs return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 pass raise TypeError( f'Unable to automatically determine file type for {str(path)}.') class Rings: """Helper class for handling mesh rings. This is a helper class to manage the calculation of internal and external rings of the mesh polygon or hull. Attributes ---------- Methods ------- __call__() Returns all rings of the mesh hull interior() Return the interior rings of the mesh hull exterior() Return the exterior rings of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the ring calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates rings. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ polys = utils.get_mesh_polygons(self.mesh.msh_t) data = [] bnd_id = 0 for poly in polys: data.append({ "geometry": poly.exterior, "bnd_id": bnd_id, "type": 'exterior' }) for interior in poly.interiors: data.append({ "geometry": interior, "bnd_id": bnd_id, "type": 'interior' }) bnd_id = bnd_id + 1 return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Extracts the exterior ring from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior ring of the mesh hull polygon. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Extracts the interior rings from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior rings of the mesh hull polygon. """ return self().loc[self()['type'] == 'interior'] class Edges: """Helper class for handling mesh boundary edges. Attributes ---------- Methods ------- __call__() Return all boundary edges of the mesh hull interior() Return the interior boundary edges of the mesh hull exterior() Return the exterior boundary edges of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the edge calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which boundary edges are calculated. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for ring in self.mesh.hull.rings().itertuples(): coords = ring.geometry.coords for i in range(1, len(coords)): data.append({ "geometry": LineString([coords[i-1], coords[i]]), "bnd_id": ring.bnd_id, "type": ring.type}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Retruns exterior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Retruns interior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'interior'] class Hull: """Helper class for handling mesh hull calculations. This class wraps the functionality of ring and edge classes and adds additional methods to calculate or extract the polygon or triangulation of the mesh Attributes ---------- Methods ------- __call__() Calculates all the polys from all mesh rings exterior() Calculates the exterior rings of the mesh hull. interior() Calculates the interior rings of the mesh hull. implode() Calculates all the polygons (including isolated domain islands) in the mesh and returns a table of polygons. multipolygon() Calculates all the polygons (including isolated domain islands) in the mesh and returns a multipolygon. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize helper class for handling mesh hull calculations Parameters ---------- mesh : EuclideanMesh Input mesh for which hull calculations are done. Notes ----- This object holds onto the ring and edge calculator objects as well as a reference to the input mesh. """ self.mesh = mesh self.rings = Rings(mesh) self.edges = Edges(mesh) @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for bnd_id in np.unique(self.rings()['bnd_id'].tolist()): exterior = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior')] interiors = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior')] data.append({ "geometry": Polygon( exterior.iloc[0].geometry.coords, [row.geometry.coords for _, row in interiors.iterrows()]), "bnd_id": bnd_id }) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Creates polygons from exterior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from exterior rings of the mesh hull """ data = [] for exterior in self.rings().loc[ self.rings()['type'] == 'exterior'].itertuples(): data.append({"geometry": Polygon(exterior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def interior(self) -> gpd.GeoDataFrame: """Creates polygons from interior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from interior rings of the mesh hull """ data = [] for interior in self.rings().loc[ self.rings()['type'] == 'interior'].itertuples(): data.append({"geometry": Polygon(interior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def implode(self) -> gpd.GeoDataFrame: """Creates a dataframe from mesh polygons. Parameters ---------- Returns ------ gpd.GeoDataFrame Dataframe containing polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The difference of the return value of this method and `__call__` is that the `implode` returns a dataframe with a single `MultiPolygon` where as `__call__` returns a dataframe with multiple `Polygon` entries with associated `bnd_id`. """ return gpd.GeoDataFrame( {"geometry": MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) def multipolygon(self) -> MultiPolygon: """Returns mesh multi-polygons. Parameters ---------- Returns ------ MultiPolygon Combined shape of polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. implode() Dataframe with a single combined multipolygon of the mesh polygons. Notes ----- The difference of the return value of this method and `implode` is that `multipolygon` returns a `MultiPolygon` object where as `implode` returns a dataframe warpping the multipolygon object. """ mp = self.implode().iloc[0].geometry if isinstance(mp, Polygon): mp = MultiPolygon([mp]) return mp def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.mesh.msh_t.tria3['index'].tolist() for quad in self.mesh.msh_t.quad4['index']: triangles.extend([ [quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]] ]) return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) class Nodes: """Helper class for handling mesh nodes. Attributes ---------- id_to_index : dict Mapping to convert node IDs to node indexes. index_to_id : dict Mapping to convert node indexes to node IDs. Methods ------- __call__() Creates a mapping between node IDs (index + 1) and node coordinates id() Returns list of node IDs. index() Return array of node indices. coords() Return mesh coordinates. values() Return values stored for mesh nodes. get_index_by_id(node_id) Get the node index based on node ID. get_id_by_index(index) Get the node ID based on the node index. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes node handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles nodes info. """ self.mesh = mesh self._id_to_index = None self._index_to_id = None @lru_cache(maxsize=1) def __call__(self) -> Dict[int, int]: """Creates a mapping between node IDs and indexes. Parameters ---------- Returns ------- dict Mapping between node IDs and indexes. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return {i+1: coord for i, coord in enumerate(self.coords())} def id(self) -> List[int]: """Retrives a list of element IDs. Parameters ---------- Returns ------- list of int List of node IDs as created by `__call__` """ return list(self().keys()) def index(self) -> npt.NDArray[int]: """Retrives an array of element indexes. Parameters ---------- Returns ------- array-like Array of node indexes. """ return np.arange(len(self())) def coords(self) -> npt.NDArray[np.float32]: """Retrieve the coordinates of mesh nodes Parameters ---------- Returns ------- array-like Coordinates of the mesh nodes as returned by `BaseMesh.coord` """ return self.mesh.coord def values(self): """Retrieve the values stored for mesh nodes Parameters ---------- Returns ------- array-like Values on the mesh nodes as returned by `BaseMesh.values` """ return self.mesh.values def get_index_by_id(self, node_id): """Converts mesh ID to mesh index. Parameters ---------- node_id : int ID of the node of interest Returns ------- int Index of the node of interest """ return self.id_to_index[node_id] def get_id_by_index(self, index: int): """Converts mesh index to mesh ID. Parameters ---------- index : int Index of the node of interest. Returns ------- int ID of the node of interest """ return self.index_to_id[index] @property def id_to_index(self) -> Dict[int, int]: """Read-only property returning the mapping of ID to index Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._id_to_index is None: self._id_to_index = {node_id: index for index, node_id in enumerate(self().keys())} return self._id_to_index @property def index_to_id(self) -> Dict[int, int]: """Read-only property returning the mapping of index to ID Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._index_to_id is None: self._index_to_id = dict(enumerate(self().keys())) return self._index_to_id # def get_indexes_around_index(self, index): # indexes_around_index = self.__dict__.get('indexes_around_index') # if indexes_around_index is None: # def append(geom): # for simplex in geom: # for i, j in permutations(simplex, 2): # indexes_around_index[i].add(j) # indexes_around_index = defaultdict(set) # append(self.gr3.elements.triangles()) # append(self.gr3.elements.quads()) # self.__dict__['indexes_around_index'] = indexes_around_index # return list(indexes_around_index[index]) class Elements: """Helper class for handling mesh elements. Attributes ---------- Methods -------- __call__() Creates a mapping between element IDs and associated node IDs. id() Returns a list of element IDs. index() Returns an array of element indexes. array() Creates and returns a masked array of element node indices. triangles() Creates and returns a 2D array of triangular element node indices. quads() Creates and returns a 2D array of quadrangular element node indices. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. geodataframe() Creates and returns a dataframe of with polygon entires for each element. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize the element handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles elements info. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> Dict[int, npt.NDArray[int]]: """Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ elements = {i+1: index+1 for i, index in enumerate(self.mesh.msh_t.tria3['index'])} elements.update({i+len(elements)+1: index+1 for i, index in enumerate(self.mesh.msh_t.quad4['index'])}) return elements @lru_cache(maxsize=1) def id(self) -> List[int]: """Retrieves the list of element IDs as returned by `__call__` Parameters ---------- Returns ------- list of int List of element IDs. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return list(self().keys()) @lru_cache(maxsize=1) def index(self) -> npt.NDArray[int]: """Retrieves an array of element indices Parameters ---------- Returns ------- npt.NDArray 1D array of element indices. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.arange(len(self())) def array(self) -> npt.NDArray[int]: """Retrieves a masked array of element node IDs. The return value is ``n x m`` where ``n`` is the number of elements and ``m`` is the maximum number of element nodes, e.g. if there are only trias, then it's 3, for trias and quads it is 4. Parameters ---------- Returns ------- npt.NDArray Masked array where elements with fewer associated nodes have trailing masked node columns in the array. """ rank = int(max(map(len, self().values()))) array = np.full((len(self()), rank), -1) for i, elem_nd_ids in enumerate(self().values()): row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))) array[i, :len(row)] = row return np.ma.masked_equal(array, -1) @lru_cache(maxsize=1) def triangles(self) -> npt.NDArray[int]: """Retrieves an array of tria element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for triangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 3]) @lru_cache(maxsize=1) def quads(self): """Retrieves an array of quad element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for quadrangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 4]) def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.triangles().tolist() for quad in self.quads(): # TODO: Not tested. triangles.append([quad[0], quad[1], quad[3]]) triangles.append([quad[1], quad[2], quad[3]]) return Triangulation( self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) def geodataframe(self) -> gpd.GeoDataFrame: """Create polygons for each element and return in dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe created from entries of `Polygon` type for each element. """ data = [] for elem_id, elem_nd_ids in self().items(): data.append({ 'geometry': Polygon( self.mesh.coord[list( map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) class Boundaries: """Helper class for mesh boundary condition calculation Attributes ---------- data : dict Mapping for boundary information Methods ------- __call__() Retrieves a dataframe for all boundary shapes and type info. __len__() Gets the number of calculated boundary segments. ocean() Retrieves a dataframe containing shapes and type info of ocean boundaries land() Retrieves a dataframe containing shapes and type info of land boundaries interior() Retrieves a dataframe containing shapes and type info of island boundaries auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1) Automatically generate boundary information based on the input land indicator `threshold` """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize boundary helper object Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates boundaries. """ # TODO: Add a way to manually initialize self.mesh = mesh self._ocean = gpd.GeoDataFrame() self._land = gpd.GeoDataFrame() self._interior = gpd.GeoDataFrame() self._data = defaultdict(defaultdict) @lru_cache(maxsize=1) def _init_dataframes(self) -> None: """Internal: Creates boundary dataframes based on boundary data Parameters ---------- Returns ------- None Notes ----- This method doesn't have any return value, but it is cached so that on re-execution it doesn't recalculate. """ boundaries = self._data ocean_boundaries = [] land_boundaries = [] interior_boundaries = [] if boundaries is not None: for ibtype, bnds in boundaries.items(): if ibtype is None: for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) ocean_boundaries.append({ 'id': bnd_id, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) elif str(ibtype).endswith('1'): for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) interior_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) else: for bnd_id, data in bnds.items(): _indexes = np.array(data['indexes']) if _indexes.ndim > 1: # ndim > 1 implies we're dealing with an ADCIRC # mesh that includes boundary pairs, such as weir new_indexes = [] for i, line in enumerate(_indexes.T): if i % 2 != 0: new_indexes.extend(np.flip(line)) else: new_indexes.extend(line) _indexes = np.array(new_indexes).flatten() else: _indexes = _indexes.flatten() indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes)) land_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) self._ocean = gpd.GeoDataFrame(ocean_boundaries) self._land = gpd.GeoDataFrame(land_boundaries) self._interior = gpd.GeoDataFrame(interior_boundaries) def ocean(self) -> gpd.GeoDataFrame: """Retrieve the ocean boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of ocean open boundary. """ self._init_dataframes() return self._ocean def land(self): """Retrieve the land boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of land boundary. """ self._init_dataframes() return self._land def interior(self): """Retrieve the island boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of island boundary. """ self._init_dataframes() return self._interior @property def data(self) -> Dict[Optional[int], Any]: """Read-only property referencing the boundary data dictionary""" return self._data @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ self._init_dataframes() data = [] for bnd in self.ocean().itertuples(): data.append({ 'id': bnd.id, 'ibtype': None, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.land().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.interior().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def __len__(self) -> int: """Returns the number of boundary segments""" return len(self()) def auto_generate( self, threshold: float = 0., land_ibtype: int = 0, interior_ibtype: int = 1, ): """Automatically detect boundaries based on elevation data. Parameters ---------- threshold : float, default=0 Threshold above which nodes are considered dry nodes for ocean vs land boundary detection land_ibtype : int, default=0 Value to assign to land boundary type interior_ibtype : int, default=1 Value to assign to island boundary type Returns ------- None Raises ------ ValueError If any of the values assigned to a mesh node is `np.nan`. Notes ----- An edge is considered dry if any of the attached nodes are dry (its elevation is larger than or equal to the `threshold`). """ values = self.mesh.value if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") coords = self.mesh.msh_t.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} polys = utils.get_mesh_polygons(self.mesh.msh_t) # TODO: Split using shapely to get bdry segments boundaries = defaultdict(defaultdict) bdry_type = dict get_id = self.mesh.nodes.get_id_by_index # generate exterior boundaries for poly in polys: ext_ring_coo = poly.exterior.coords ext_ring = np.array([ (coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[e + 1]]) for e, coo in enumerate(ext_ring_coo[:-1])]) # find boundary edges edge_tag = np.full(ext_ring.shape, 0) edge_tag[ np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1 edge_tag[ np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1 edge_tag[ np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1 edge_tag[ np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1 # sort boundary edges ocean_boundary = [] land_boundary = [] for i, (e0, e1) in enumerate(edge_tag): if np.any(np.asarray((e0, e1)) == 1): land_boundary.append(tuple(ext_ring[i, :])) elif np.any(np.asarray((e0, e1)) == -1): ocean_boundary.append(tuple(ext_ring[i, :])) # ocean_boundaries = utils.sort_edges(ocean_boundary) # land_boundaries = utils.sort_edges(land_boundary) ocean_boundaries = [] if len(ocean_boundary) != 0: #pylint: disable=not-an-iterable ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist()) ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs ocean_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in ocean_segs] land_boundaries = [] if len(land_boundary) != 0: #pylint: disable=not-an-iterable land_segs = linemerge(coords[np.array(land_boundary)].tolist()) land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs land_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in land_segs] _bnd_id = len(boundaries[None]) for bnd in ocean_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[None][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # add land boundaries _bnd_id = len(boundaries[land_ibtype]) for bnd in land_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[land_ibtype][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # generate interior boundaries _bnd_id = 0 interior_boundaries = defaultdict() for poly in polys: interiors = poly.interiors for interior in interiors: int_ring_coo = interior.coords int_ring = [ (coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[e + 1]]) for e, coo in enumerate(int_ring_coo[:-1])] # TODO: Do we still need these? e0, e1 = [list(t) for t in zip(*int_ring)] if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0: e0 = e0[::-1] e1 = e1[::-1] e0 = [get_id(vert) for vert in e0] e0.append(e0[0]) interior_boundaries[_bnd_id] = e0 _bnd_id += 1 for bnd_id, data in interior_boundaries.items(): boundaries[interior_ibtype][bnd_id] = bdry_type( indexes=data, properties={}) self._data = boundaries self._init_dataframes.cache_clear() self.__call__.cache_clear() self._init_dataframes() SortedRingType = Dict[int, Dict[Literal['exterior', 'interiors'], Union[npt.NDArray, List[npt.NDArray]]] ] def sort_rings( index_rings: List[List[Tuple[int, int]]], vertices: npt.NDArray[np.float32]) -> SortedRingType: """Sorts a list of index-rings. Takes a list of unsorted index rings and sorts them into "exterior" and "interior" components. Any doubly-nested rings are considered exterior rings. Parameters ---------- index_rings : List[List[Tuple[int, int]]] Unosorted list of list of mesh edges as specified by end node indexs of each edge. vertices : npt.NDArray[np.float32] 2D ``n x 2`` array of node coordinate couples. Returns ------- SortedRingType Dictionary of information aboout polygon boundaries extracted based on the input Notes ----- The return value is a mapping of ring index to dictionary containing exterior and interior linear ring information as numpy array This function is not currently used, instead a different faster approach is used for boundary and polygon calculation from elements. """ # TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can # probably be optimized using shapely. # sort index_rings into corresponding "polygons" areas = [] for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = {} _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = [] for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = [] for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings def _mesh_interpolate_worker( coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[str, Path], chunk_size: Optional[int], method: Literal['spline', 'linear', 'nearest'] = "spline", filter_by_shape: bool = False): """Interpolator worker function to be used in parallel calls Parameters ---------- coords : npt.NDArray[np.float32] Mesh node coordinates. coords_crs : CRS Coordinate reference system of the input mesh coordinates. raster_path : str or Path Path to the raster temporary working file. chunk_size : int or None Chunk size for windowing over the raster. method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- idxs : npt.NDArray[bool] Mask of the nodes whose values are updated by current interpolation values : npt.NDArray[np.float32] Interpolated values. Raises ------ ValueError If specified interpolation `method` is not supported. """ coords = np.array(coords) raster = Raster(raster_path) idxs = [] values = [] for window in raster.iter_windows(chunk_size=chunk_size, overlap=2): if not raster.crs.equals(coords_crs): transformer = Transformer.from_crs( coords_crs, raster.crs, always_xy=True) # pylint: disable=E0633 coords[:, 0], coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) xi = raster.get_x(window) yi = raster.get_y(window) # Use masked array to ignore missing values from DEM zi = raster.get_values(window=window, masked=True) if not filter_by_shape: _idxs = np.logical_and( np.logical_and( np.min(xi) <= coords[:, 0], np.max(xi) >= coords[:, 0]), np.logical_and( np.min(yi) <= coords[:, 1], np.max(yi) >= coords[:, 1])) else: shape = raster.get_multipolygon() gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1]) _idxs = gs_pt.intersects(shape) interp_mask = None if method == 'spline': f = RectBivariateSpline( xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0, # bbox=[min(x), max(x), min(y), max(y)] # ?? ) _values = f.ev(coords[_idxs, 0], coords[_idxs, 1]) elif method in ['nearest', 'linear']: # Inspired by StackOverflow 35807321 if np.any(zi.mask): m_interp = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method ) # Pick nodes NOT "contaminated" by masked values interp_mask = m_interp(coords[_idxs]) > 0 f = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi).T, method=method ) _values = f(coords[_idxs]) else: raise ValueError( f"Invalid value method specified <{method}>!") if interp_mask is not None: # pylint: disable=invalid-unary-operand-type helper = np.ones_like(_values).astype(bool) helper[interp_mask] = False # _idxs is inverse mask _idxs[_idxs] = helper _values = _values[~interp_mask] idxs.append(_idxs) values.append(_values) return (np.hstack(idxs), np.hstack(values))
def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``. """ super().__init__(mesh) self._boundaries = None if mesh.ndims != +2: raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, ' "but expected ndims=2.") if len(self.msh_t.value) == 0: self.msh_t.value = np.array( np.full((self.vert2['coord'].shape[0], 1), np.nan))
246
269
"""This module defines classes that handle mesh and mesh operations. This module defines a factory class for mesh, similar to geometry and size function factory class. It also defines concrete mesh types. Currently two concrete mesh types are defined for generic Eucledian mesh and specific 2D Eucledian mesh. """ from functools import lru_cache import logging from multiprocessing import Pool, cpu_count import os import pathlib from collections import defaultdict import warnings from typing import Union, List, Tuple, Dict, Any, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal import pandas as pd import geopandas as gpd from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk from matplotlib.path import Path from matplotlib.transforms import Bbox from matplotlib.tri import Triangulation from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np import numpy.typing as npt from pyproj import CRS, Transformer from scipy.interpolate import ( RectBivariateSpline, RegularGridInterpolator) from shapely.geometry import ( LineString, box, Polygon, MultiPolygon) from shapely.ops import polygonize, linemerge from ocsmesh import utils from ocsmesh.raster import Raster from ocsmesh.mesh.base import BaseMesh from ocsmesh.mesh.parsers import grd, sms2dm _logger = logging.getLogger(__name__) class EuclideanMesh(BaseMesh): """Generic Euclidean mesh class This is the base class for 2D or 3D Euclidean mesh. Attributes ---------- tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t] Reference to underlying jigsaw mesh's triangle element structure. triangles : npt.NDArray[np.float32] Array of node index for triangular elements. quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t] Reference to underlying jigsaw mesh's quadrangle element structure. quads : npt.NDArray[np.float32] Array of node index for quadrangular elements. crs : CRS Coodrinate reference system of the mesh object hull : Hull Handle to hull calculation helper object nodes : Nodes Handle to node handler helper object elements : Elements Handle to element handler helper object Methods ------- write(path, overwrite=False, format='grd') Export mesh object to the disk in the specified format. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type. """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID != 'euclidean-mesh': raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, ' "but expected 'euclidean-mesh'.") if not hasattr(mesh, 'crs'): warnings.warn('Input mesh has no CRS information.') mesh.crs = None else: if not isinstance(mesh.crs, CRS): raise ValueError(f'crs property must be of type {CRS}, not ' f'type {type(mesh.crs)}.') self._hull = None self._nodes = None self._elements = None self._msh_t = mesh def write( self, path: Union[str, os.PathLike], overwrite: bool = False, format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622 ) -> None: """Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported. """ path = pathlib.Path(path) if path.exists() and overwrite is not True: raise IOError( f'File {str(path)} exists and overwrite is not True.') if format == 'grd': grd_dict = utils.msh_t_to_grd(self.msh_t) if self._boundaries and self._boundaries.data: grd_dict.update(boundaries=self._boundaries.data) grd.write(grd_dict, path, overwrite) elif format == '2dm': sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite) elif format == 'msh': savemsh(str(path), self.msh_t) elif format == 'vtk': savevtk(str(path), self.msh_t) else: raise ValueError(f'Unhandled format {format}.') @property def tria3(self): """Reference to underlying mesh tirangle element structure""" return self.msh_t.tria3 @property def triangles(self): """Reference to underlying mesh triangle element index array""" return self.msh_t.tria3['index'] @property def quad4(self): """Reference to underlying mesh quadrangle element structure""" return self.msh_t.quad4 @property def quads(self): """Reference to underlying mesh quadrangle element index array""" return self.msh_t.quad4['index'] @property def crs(self): """Reference to underlying mesh crs""" return self.msh_t.crs @property def hull(self): """Reference to hull calculator helper object""" if self._hull is None: self._hull = Hull(self) return self._hull @property def nodes(self): """Reference to node handler helper object""" if self._nodes is None: self._nodes = Nodes(self) return self._nodes @property def elements(self): """Reference to element handler helper object""" if self._elements is None: self._elements = Elements(self) return self._elements class EuclideanMesh2D(EuclideanMesh): """2D Euclidean mesh definition Attributes ---------- boundaries vert2 value bbox Methods ------- get_bbox(crs=None, output_type=None) Gets the bounding box of the mesh elements. tricontourf(**kwargs) Create a contour plot from the value data on the nodes of the mesh interpolate(raster, method='spline', nprocs=None) Interpolate raster date on the nodes. get_contour(level) Get contour lines from node value data at specified levels. get_multipolygon(zmin=None, zmax=None) Get multipolygon of the mesh hull. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``. """ super().__init__(mesh) self._boundaries = None if mesh.ndims != +2: raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, ' "but expected ndims=2.") if len(self.msh_t.value) == 0: self.msh_t.value = np.array( np.full((self.vert2['coord'].shape[0], 1), np.nan)) def get_bbox( self, crs: Union[str, CRS, None] = None, output_type: Literal[None, 'polygon', 'bbox'] = None ) -> Union[Polygon, Bbox]: """Get the bounding box of mesh elements. Parameters ---------- crs : str or CRS or None, default=None CRS to transform the calculated bounding box into before returning output_type : { None, 'polygon', 'bbox'}, default=None Output type Returns ------- Polygon or Bbox Bounding box of the mesh elements. """ output_type = 'polygon' if output_type is None else output_type xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0]) ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1]) crs = self.crs if crs is None else crs if crs is not None: if not self.crs.equals(crs): transformer = Transformer.from_crs( self.crs, crs, always_xy=True) # pylint: disable=E0633 (xmin, xmax), (ymin, ymax) = transformer.transform( (xmin, xmax), (ymin, ymax)) if output_type == 'polygon': # pylint: disable=R1705 return box(xmin, ymin, xmax, ymax) elif output_type == 'bbox': return Bbox([[xmin, ymin], [xmax, ymax]]) raise TypeError( 'Argument output_type must a string literal \'polygon\' or ' '\'bbox\'') @property def boundaries(self): """Handle to boundaries calculator helper object""" if self._boundaries is None: self._boundaries = Boundaries(self) return self._boundaries def tricontourf(self, **kwargs) -> Axes: """Generate contour for the data of triangular elements of the mesh Parameters ---------- **kwargs : dict, optional Passed to underlying `matplotlib` API. Returns ------- Axes Axes on which the filled contour is drawn. """ return utils.tricontourf(self.msh_t, **kwargs) def interpolate( self, raster: Union[Raster, List[Raster]], method: Literal['spline', 'linear', 'nearest'] = 'spline', nprocs: Optional[int] = None, info_out_path: Union[pathlib.Path, str, None] = None, filter_by_shape: bool = False ) -> None: """Interplate values from raster inputs to the mesh nodes. Parameters ---------- raster : Raster or list of Raster A single or a list of rasters from which values are interpolated onto the mesh method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. nprocs : int or None, default=None Number of workers to use when interpolating data. info_out_path : pathlike or str or None Path for the output node interpolation information file filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- None """ if isinstance(raster, Raster): raster = [raster] nprocs = -1 if nprocs is None else nprocs nprocs = cpu_count() if nprocs == -1 else nprocs # Fix an issue on Jupyter notebook where having pool execute # interpolation even in case of nprocs == 1 would results in # application getting stuck if nprocs > 1: with Pool(processes=nprocs) as pool: res = pool.starmap( _mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] ) pool.join() else: res = [_mesh_interpolate_worker( self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] values = self.msh_t.value.flatten() interp_info_map = {} for (mask, _values), rast in zip(res, raster): values[mask] = _values if info_out_path is not None: vert_cs = None rast_crs = rast.crs if rast_crs.is_vertical: if rast_crs.sub_crs_list is not None: for sub_crs in rast_crs.sub_crs_list: if sub_crs.is_vertical: # TODO: What if sub CRS is compound, etc.? vert_cs = sub_crs elif rast_crs.source_crs is not None: if rast_crs.source_crs.is_vertical: # TODO: What if source CRS is compound, etc.? vert_cs = rast_crs.source_crs vert_cs_name = vert_cs.name idxs = np.argwhere(mask).ravel() interp_info_map.update({ idx: (rast.path, vert_cs_name) for idx in idxs}) if info_out_path is not None: coords = self.msh_t.vert2['coord'].copy() geo_coords = coords.copy() if not self.crs.is_geographic: transformer = Transformer.from_crs( self.crs, CRS.from_epsg(4326), always_xy=True) # pylint: disable=E0633 geo_coords[:, 0], geo_coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) vd_idxs=np.array(list(interp_info_map.keys())) df_interp_info = pd.DataFrame( index=vd_idxs, data={ 'x': coords[vd_idxs, 0], 'y': coords[vd_idxs, 1], 'lat': geo_coords[vd_idxs, 0], 'lon': geo_coords[vd_idxs, 1], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()] } ) df_interp_info.sort_index().to_csv( info_out_path, header=False, index=True) self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) def get_contour(self, level: float) -> LineString: """Extract contour lines at the specified `level` from mesh values Parameters ---------- level : float The level at which contour lines must be extracted. Returns ------- LineString Extracted and merged contour lines. Raises ------ ValueError If mesh has nodes that have null value `np.nan`. """ # ONLY SUPPORTS TRIANGLES for attr in ['quad4', 'hexa8']: if len(getattr(self.msh_t, attr)) > 0: warnings.warn( 'Mesh contour extraction only supports triangles') coords = self.msh_t.vert2['coord'] values = self.msh_t.value trias = self.msh_t.tria3['index'] if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") x, y = coords[:, 0], coords[:, 1] features = [] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) _logger.debug('Computing contours...') fig, ax = plt.subplots() ax.tricontour( x, y, trias, values.ravel(), levels=[level]) plt.close(fig) for path_collection in ax.collections: for path in path_collection.get_paths(): try: features.append(LineString(path.vertices)) except ValueError: # LineStrings must have at least 2 coordinate tuples pass return linemerge(features) def get_multipolygon( self, zmin: Optional[float] = None, zmax: Optional[float] = None ) -> MultiPolygon: """Calculate multipolygon covering mesh elements (hull) Parameters ---------- zmin : float or None Minimum elevation to consider for multipolygon extraction zmax : float or None Maximum elevation to consider for multipolygon extraction Returns ------- MultiPolygon Calculated multipolygon shape """ values = self.msh_t.value mask = np.ones(values.shape) if zmin is not None: mask = np.logical_and(mask, values > zmin) if zmax is not None: mask = np.logical_and(mask, values < zmax) # Assuming value is of shape (N, 1) # ravel to make sure it's 1D verts_in = np.argwhere(mask).ravel() clipped_mesh = utils.clip_mesh_by_vertex( self.msh_t, verts_in, can_use_other_verts=True) boundary_edges = utils.get_boundary_edges(clipped_mesh) coords = clipped_mesh.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} poly_gen = polygonize(coords[boundary_edges]) polys = list(poly_gen) polys = sorted(polys, key=lambda p: p.area, reverse=True) rings = [p.exterior for p in polys] n_parents = np.zeros((len(rings),)) represent = np.array([r.coords[0] for r in rings]) for e, ring in enumerate(rings[:-1]): path = Path(ring.coords, closed=True) n_parents = n_parents + np.pad( np.array([ path.contains_point(pt) for pt in represent[e+1:]]), (e+1, 0), 'constant', constant_values=0) # Get actual polygons based on logic described above polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2] return MultiPolygon(polys) @property def vert2(self): """Reference to underlying mesh 2D vertices structure""" return self.msh_t.vert2 @property def value(self): """Reference to underlying mesh values""" return self.msh_t.value @property def bbox(self): """Calculates and returns bounding box of the mesh hull. See Also -------- get_bbox """ return self.get_bbox() MeshType = Union[EuclideanMesh2D] class Mesh(BaseMesh): """Mesh object factory Factory class that creates and returns concrete mesh object based on the input types. Methods ------- open(path, crs=None) Read mesh data from a file on disk. """ def __new__(cls, mesh: jigsaw_msh_t) -> MeshType: """Construct a concrete mesh object. Parameters ---------- mesh : jigsaw_msh_t Input jigsaw mesh object Returns ------- MeshType Mesh object created from the input Raises ------ TypeError Input `mesh` is not a `jigsaw_msh_t` object. NotImplementedError Input `mesh` object cannot be used to create a EuclideanMesh2D """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID == 'euclidean-mesh': if mesh.ndims == 2: return EuclideanMesh2D(mesh) raise NotImplementedError( f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not ' 'handled.') raise NotImplementedError(f'mshID={mesh.mshID} not handled.') @staticmethod def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType: """Read mesh from a file on disk Parameters ---------- path : path-like Path to the file containig mesh. crs : CRS or None, default=None CRS of the mesh in the path. Overwrites any info read from file, no transformation is done. Returns ------- MeshType Mesh object created by reading the file. Raises ------ TypeError If cannot determine the input mesh type. Notes ----- Currently only SMS-2DM and GRD formats are supported for reading. """ try: msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs)) msh_t.value = np.negative(msh_t.value) return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 if 'not a valid grd file' in str(e): pass else: raise e try: return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs))) except ValueError: pass try: msh_t = jigsaw_msh_t() loadmsh(msh_t, path) msh_t.crs = crs return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 pass raise TypeError( f'Unable to automatically determine file type for {str(path)}.') class Rings: """Helper class for handling mesh rings. This is a helper class to manage the calculation of internal and external rings of the mesh polygon or hull. Attributes ---------- Methods ------- __call__() Returns all rings of the mesh hull interior() Return the interior rings of the mesh hull exterior() Return the exterior rings of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the ring calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates rings. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ polys = utils.get_mesh_polygons(self.mesh.msh_t) data = [] bnd_id = 0 for poly in polys: data.append({ "geometry": poly.exterior, "bnd_id": bnd_id, "type": 'exterior' }) for interior in poly.interiors: data.append({ "geometry": interior, "bnd_id": bnd_id, "type": 'interior' }) bnd_id = bnd_id + 1 return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Extracts the exterior ring from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior ring of the mesh hull polygon. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Extracts the interior rings from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior rings of the mesh hull polygon. """ return self().loc[self()['type'] == 'interior'] class Edges: """Helper class for handling mesh boundary edges. Attributes ---------- Methods ------- __call__() Return all boundary edges of the mesh hull interior() Return the interior boundary edges of the mesh hull exterior() Return the exterior boundary edges of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the edge calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which boundary edges are calculated. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for ring in self.mesh.hull.rings().itertuples(): coords = ring.geometry.coords for i in range(1, len(coords)): data.append({ "geometry": LineString([coords[i-1], coords[i]]), "bnd_id": ring.bnd_id, "type": ring.type}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Retruns exterior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Retruns interior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'interior'] class Hull: """Helper class for handling mesh hull calculations. This class wraps the functionality of ring and edge classes and adds additional methods to calculate or extract the polygon or triangulation of the mesh Attributes ---------- Methods ------- __call__() Calculates all the polys from all mesh rings exterior() Calculates the exterior rings of the mesh hull. interior() Calculates the interior rings of the mesh hull. implode() Calculates all the polygons (including isolated domain islands) in the mesh and returns a table of polygons. multipolygon() Calculates all the polygons (including isolated domain islands) in the mesh and returns a multipolygon. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize helper class for handling mesh hull calculations Parameters ---------- mesh : EuclideanMesh Input mesh for which hull calculations are done. Notes ----- This object holds onto the ring and edge calculator objects as well as a reference to the input mesh. """ self.mesh = mesh self.rings = Rings(mesh) self.edges = Edges(mesh) @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for bnd_id in np.unique(self.rings()['bnd_id'].tolist()): exterior = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior')] interiors = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior')] data.append({ "geometry": Polygon( exterior.iloc[0].geometry.coords, [row.geometry.coords for _, row in interiors.iterrows()]), "bnd_id": bnd_id }) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Creates polygons from exterior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from exterior rings of the mesh hull """ data = [] for exterior in self.rings().loc[ self.rings()['type'] == 'exterior'].itertuples(): data.append({"geometry": Polygon(exterior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def interior(self) -> gpd.GeoDataFrame: """Creates polygons from interior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from interior rings of the mesh hull """ data = [] for interior in self.rings().loc[ self.rings()['type'] == 'interior'].itertuples(): data.append({"geometry": Polygon(interior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def implode(self) -> gpd.GeoDataFrame: """Creates a dataframe from mesh polygons. Parameters ---------- Returns ------ gpd.GeoDataFrame Dataframe containing polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The difference of the return value of this method and `__call__` is that the `implode` returns a dataframe with a single `MultiPolygon` where as `__call__` returns a dataframe with multiple `Polygon` entries with associated `bnd_id`. """ return gpd.GeoDataFrame( {"geometry": MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) def multipolygon(self) -> MultiPolygon: """Returns mesh multi-polygons. Parameters ---------- Returns ------ MultiPolygon Combined shape of polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. implode() Dataframe with a single combined multipolygon of the mesh polygons. Notes ----- The difference of the return value of this method and `implode` is that `multipolygon` returns a `MultiPolygon` object where as `implode` returns a dataframe warpping the multipolygon object. """ mp = self.implode().iloc[0].geometry if isinstance(mp, Polygon): mp = MultiPolygon([mp]) return mp def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.mesh.msh_t.tria3['index'].tolist() for quad in self.mesh.msh_t.quad4['index']: triangles.extend([ [quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]] ]) return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) class Nodes: """Helper class for handling mesh nodes. Attributes ---------- id_to_index : dict Mapping to convert node IDs to node indexes. index_to_id : dict Mapping to convert node indexes to node IDs. Methods ------- __call__() Creates a mapping between node IDs (index + 1) and node coordinates id() Returns list of node IDs. index() Return array of node indices. coords() Return mesh coordinates. values() Return values stored for mesh nodes. get_index_by_id(node_id) Get the node index based on node ID. get_id_by_index(index) Get the node ID based on the node index. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes node handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles nodes info. """ self.mesh = mesh self._id_to_index = None self._index_to_id = None @lru_cache(maxsize=1) def __call__(self) -> Dict[int, int]: """Creates a mapping between node IDs and indexes. Parameters ---------- Returns ------- dict Mapping between node IDs and indexes. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return {i+1: coord for i, coord in enumerate(self.coords())} def id(self) -> List[int]: """Retrives a list of element IDs. Parameters ---------- Returns ------- list of int List of node IDs as created by `__call__` """ return list(self().keys()) def index(self) -> npt.NDArray[int]: """Retrives an array of element indexes. Parameters ---------- Returns ------- array-like Array of node indexes. """ return np.arange(len(self())) def coords(self) -> npt.NDArray[np.float32]: """Retrieve the coordinates of mesh nodes Parameters ---------- Returns ------- array-like Coordinates of the mesh nodes as returned by `BaseMesh.coord` """ return self.mesh.coord def values(self): """Retrieve the values stored for mesh nodes Parameters ---------- Returns ------- array-like Values on the mesh nodes as returned by `BaseMesh.values` """ return self.mesh.values def get_index_by_id(self, node_id): """Converts mesh ID to mesh index. Parameters ---------- node_id : int ID of the node of interest Returns ------- int Index of the node of interest """ return self.id_to_index[node_id] def get_id_by_index(self, index: int): """Converts mesh index to mesh ID. Parameters ---------- index : int Index of the node of interest. Returns ------- int ID of the node of interest """ return self.index_to_id[index] @property def id_to_index(self) -> Dict[int, int]: """Read-only property returning the mapping of ID to index Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._id_to_index is None: self._id_to_index = {node_id: index for index, node_id in enumerate(self().keys())} return self._id_to_index @property def index_to_id(self) -> Dict[int, int]: """Read-only property returning the mapping of index to ID Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._index_to_id is None: self._index_to_id = dict(enumerate(self().keys())) return self._index_to_id # def get_indexes_around_index(self, index): # indexes_around_index = self.__dict__.get('indexes_around_index') # if indexes_around_index is None: # def append(geom): # for simplex in geom: # for i, j in permutations(simplex, 2): # indexes_around_index[i].add(j) # indexes_around_index = defaultdict(set) # append(self.gr3.elements.triangles()) # append(self.gr3.elements.quads()) # self.__dict__['indexes_around_index'] = indexes_around_index # return list(indexes_around_index[index]) class Elements: """Helper class for handling mesh elements. Attributes ---------- Methods -------- __call__() Creates a mapping between element IDs and associated node IDs. id() Returns a list of element IDs. index() Returns an array of element indexes. array() Creates and returns a masked array of element node indices. triangles() Creates and returns a 2D array of triangular element node indices. quads() Creates and returns a 2D array of quadrangular element node indices. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. geodataframe() Creates and returns a dataframe of with polygon entires for each element. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize the element handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles elements info. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> Dict[int, npt.NDArray[int]]: """Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ elements = {i+1: index+1 for i, index in enumerate(self.mesh.msh_t.tria3['index'])} elements.update({i+len(elements)+1: index+1 for i, index in enumerate(self.mesh.msh_t.quad4['index'])}) return elements @lru_cache(maxsize=1) def id(self) -> List[int]: """Retrieves the list of element IDs as returned by `__call__` Parameters ---------- Returns ------- list of int List of element IDs. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return list(self().keys()) @lru_cache(maxsize=1) def index(self) -> npt.NDArray[int]: """Retrieves an array of element indices Parameters ---------- Returns ------- npt.NDArray 1D array of element indices. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.arange(len(self())) def array(self) -> npt.NDArray[int]: """Retrieves a masked array of element node IDs. The return value is ``n x m`` where ``n`` is the number of elements and ``m`` is the maximum number of element nodes, e.g. if there are only trias, then it's 3, for trias and quads it is 4. Parameters ---------- Returns ------- npt.NDArray Masked array where elements with fewer associated nodes have trailing masked node columns in the array. """ rank = int(max(map(len, self().values()))) array = np.full((len(self()), rank), -1) for i, elem_nd_ids in enumerate(self().values()): row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))) array[i, :len(row)] = row return np.ma.masked_equal(array, -1) @lru_cache(maxsize=1) def triangles(self) -> npt.NDArray[int]: """Retrieves an array of tria element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for triangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 3]) @lru_cache(maxsize=1) def quads(self): """Retrieves an array of quad element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for quadrangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 4]) def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.triangles().tolist() for quad in self.quads(): # TODO: Not tested. triangles.append([quad[0], quad[1], quad[3]]) triangles.append([quad[1], quad[2], quad[3]]) return Triangulation( self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) def geodataframe(self) -> gpd.GeoDataFrame: """Create polygons for each element and return in dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe created from entries of `Polygon` type for each element. """ data = [] for elem_id, elem_nd_ids in self().items(): data.append({ 'geometry': Polygon( self.mesh.coord[list( map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) class Boundaries: """Helper class for mesh boundary condition calculation Attributes ---------- data : dict Mapping for boundary information Methods ------- __call__() Retrieves a dataframe for all boundary shapes and type info. __len__() Gets the number of calculated boundary segments. ocean() Retrieves a dataframe containing shapes and type info of ocean boundaries land() Retrieves a dataframe containing shapes and type info of land boundaries interior() Retrieves a dataframe containing shapes and type info of island boundaries auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1) Automatically generate boundary information based on the input land indicator `threshold` """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize boundary helper object Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates boundaries. """ # TODO: Add a way to manually initialize self.mesh = mesh self._ocean = gpd.GeoDataFrame() self._land = gpd.GeoDataFrame() self._interior = gpd.GeoDataFrame() self._data = defaultdict(defaultdict) @lru_cache(maxsize=1) def _init_dataframes(self) -> None: """Internal: Creates boundary dataframes based on boundary data Parameters ---------- Returns ------- None Notes ----- This method doesn't have any return value, but it is cached so that on re-execution it doesn't recalculate. """ boundaries = self._data ocean_boundaries = [] land_boundaries = [] interior_boundaries = [] if boundaries is not None: for ibtype, bnds in boundaries.items(): if ibtype is None: for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) ocean_boundaries.append({ 'id': bnd_id, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) elif str(ibtype).endswith('1'): for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) interior_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) else: for bnd_id, data in bnds.items(): _indexes = np.array(data['indexes']) if _indexes.ndim > 1: # ndim > 1 implies we're dealing with an ADCIRC # mesh that includes boundary pairs, such as weir new_indexes = [] for i, line in enumerate(_indexes.T): if i % 2 != 0: new_indexes.extend(np.flip(line)) else: new_indexes.extend(line) _indexes = np.array(new_indexes).flatten() else: _indexes = _indexes.flatten() indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes)) land_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) self._ocean = gpd.GeoDataFrame(ocean_boundaries) self._land = gpd.GeoDataFrame(land_boundaries) self._interior = gpd.GeoDataFrame(interior_boundaries) def ocean(self) -> gpd.GeoDataFrame: """Retrieve the ocean boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of ocean open boundary. """ self._init_dataframes() return self._ocean def land(self): """Retrieve the land boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of land boundary. """ self._init_dataframes() return self._land def interior(self): """Retrieve the island boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of island boundary. """ self._init_dataframes() return self._interior @property def data(self) -> Dict[Optional[int], Any]: """Read-only property referencing the boundary data dictionary""" return self._data @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ self._init_dataframes() data = [] for bnd in self.ocean().itertuples(): data.append({ 'id': bnd.id, 'ibtype': None, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.land().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.interior().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def __len__(self) -> int: """Returns the number of boundary segments""" return len(self()) def auto_generate( self, threshold: float = 0., land_ibtype: int = 0, interior_ibtype: int = 1, ): """Automatically detect boundaries based on elevation data. Parameters ---------- threshold : float, default=0 Threshold above which nodes are considered dry nodes for ocean vs land boundary detection land_ibtype : int, default=0 Value to assign to land boundary type interior_ibtype : int, default=1 Value to assign to island boundary type Returns ------- None Raises ------ ValueError If any of the values assigned to a mesh node is `np.nan`. Notes ----- An edge is considered dry if any of the attached nodes are dry (its elevation is larger than or equal to the `threshold`). """ values = self.mesh.value if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") coords = self.mesh.msh_t.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} polys = utils.get_mesh_polygons(self.mesh.msh_t) # TODO: Split using shapely to get bdry segments boundaries = defaultdict(defaultdict) bdry_type = dict get_id = self.mesh.nodes.get_id_by_index # generate exterior boundaries for poly in polys: ext_ring_coo = poly.exterior.coords ext_ring = np.array([ (coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[e + 1]]) for e, coo in enumerate(ext_ring_coo[:-1])]) # find boundary edges edge_tag = np.full(ext_ring.shape, 0) edge_tag[ np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1 edge_tag[ np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1 edge_tag[ np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1 edge_tag[ np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1 # sort boundary edges ocean_boundary = [] land_boundary = [] for i, (e0, e1) in enumerate(edge_tag): if np.any(np.asarray((e0, e1)) == 1): land_boundary.append(tuple(ext_ring[i, :])) elif np.any(np.asarray((e0, e1)) == -1): ocean_boundary.append(tuple(ext_ring[i, :])) # ocean_boundaries = utils.sort_edges(ocean_boundary) # land_boundaries = utils.sort_edges(land_boundary) ocean_boundaries = [] if len(ocean_boundary) != 0: #pylint: disable=not-an-iterable ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist()) ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs ocean_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in ocean_segs] land_boundaries = [] if len(land_boundary) != 0: #pylint: disable=not-an-iterable land_segs = linemerge(coords[np.array(land_boundary)].tolist()) land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs land_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in land_segs] _bnd_id = len(boundaries[None]) for bnd in ocean_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[None][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # add land boundaries _bnd_id = len(boundaries[land_ibtype]) for bnd in land_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[land_ibtype][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # generate interior boundaries _bnd_id = 0 interior_boundaries = defaultdict() for poly in polys: interiors = poly.interiors for interior in interiors: int_ring_coo = interior.coords int_ring = [ (coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[e + 1]]) for e, coo in enumerate(int_ring_coo[:-1])] # TODO: Do we still need these? e0, e1 = [list(t) for t in zip(*int_ring)] if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0: e0 = e0[::-1] e1 = e1[::-1] e0 = [get_id(vert) for vert in e0] e0.append(e0[0]) interior_boundaries[_bnd_id] = e0 _bnd_id += 1 for bnd_id, data in interior_boundaries.items(): boundaries[interior_ibtype][bnd_id] = bdry_type( indexes=data, properties={}) self._data = boundaries self._init_dataframes.cache_clear() self.__call__.cache_clear() self._init_dataframes() SortedRingType = Dict[int, Dict[Literal['exterior', 'interiors'], Union[npt.NDArray, List[npt.NDArray]]] ] def sort_rings( index_rings: List[List[Tuple[int, int]]], vertices: npt.NDArray[np.float32]) -> SortedRingType: """Sorts a list of index-rings. Takes a list of unsorted index rings and sorts them into "exterior" and "interior" components. Any doubly-nested rings are considered exterior rings. Parameters ---------- index_rings : List[List[Tuple[int, int]]] Unosorted list of list of mesh edges as specified by end node indexs of each edge. vertices : npt.NDArray[np.float32] 2D ``n x 2`` array of node coordinate couples. Returns ------- SortedRingType Dictionary of information aboout polygon boundaries extracted based on the input Notes ----- The return value is a mapping of ring index to dictionary containing exterior and interior linear ring information as numpy array This function is not currently used, instead a different faster approach is used for boundary and polygon calculation from elements. """ # TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can # probably be optimized using shapely. # sort index_rings into corresponding "polygons" areas = [] for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = {} _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = [] for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = [] for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings def _mesh_interpolate_worker( coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[str, Path], chunk_size: Optional[int], method: Literal['spline', 'linear', 'nearest'] = "spline", filter_by_shape: bool = False): """Interpolator worker function to be used in parallel calls Parameters ---------- coords : npt.NDArray[np.float32] Mesh node coordinates. coords_crs : CRS Coordinate reference system of the input mesh coordinates. raster_path : str or Path Path to the raster temporary working file. chunk_size : int or None Chunk size for windowing over the raster. method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- idxs : npt.NDArray[bool] Mask of the nodes whose values are updated by current interpolation values : npt.NDArray[np.float32] Interpolated values. Raises ------ ValueError If specified interpolation `method` is not supported. """ coords = np.array(coords) raster = Raster(raster_path) idxs = [] values = [] for window in raster.iter_windows(chunk_size=chunk_size, overlap=2): if not raster.crs.equals(coords_crs): transformer = Transformer.from_crs( coords_crs, raster.crs, always_xy=True) # pylint: disable=E0633 coords[:, 0], coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) xi = raster.get_x(window) yi = raster.get_y(window) # Use masked array to ignore missing values from DEM zi = raster.get_values(window=window, masked=True) if not filter_by_shape: _idxs = np.logical_and( np.logical_and( np.min(xi) <= coords[:, 0], np.max(xi) >= coords[:, 0]), np.logical_and( np.min(yi) <= coords[:, 1], np.max(yi) >= coords[:, 1])) else: shape = raster.get_multipolygon() gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1]) _idxs = gs_pt.intersects(shape) interp_mask = None if method == 'spline': f = RectBivariateSpline( xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0, # bbox=[min(x), max(x), min(y), max(y)] # ?? ) _values = f.ev(coords[_idxs, 0], coords[_idxs, 1]) elif method in ['nearest', 'linear']: # Inspired by StackOverflow 35807321 if np.any(zi.mask): m_interp = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method ) # Pick nodes NOT "contaminated" by masked values interp_mask = m_interp(coords[_idxs]) > 0 f = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi).T, method=method ) _values = f(coords[_idxs]) else: raise ValueError( f"Invalid value method specified <{method}>!") if interp_mask is not None: # pylint: disable=invalid-unary-operand-type helper = np.ones_like(_values).astype(bool) helper[interp_mask] = False # _idxs is inverse mask _idxs[_idxs] = helper _values = _values[~interp_mask] idxs.append(_idxs) values.append(_values) return (np.hstack(idxs), np.hstack(values))
__call__
Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values.
"""This module defines classes that handle mesh and mesh operations. This module defines a factory class for mesh, similar to geometry and size function factory class. It also defines concrete mesh types. Currently two concrete mesh types are defined for generic Eucledian mesh and specific 2D Eucledian mesh. """ from functools import lru_cache import logging from multiprocessing import Pool, cpu_count import os import pathlib from collections import defaultdict import warnings from typing import Union, List, Tuple, Dict, Any, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal import pandas as pd import geopandas as gpd from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk from matplotlib.path import Path from matplotlib.transforms import Bbox from matplotlib.tri import Triangulation from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np import numpy.typing as npt from pyproj import CRS, Transformer from scipy.interpolate import ( RectBivariateSpline, RegularGridInterpolator) from shapely.geometry import ( LineString, box, Polygon, MultiPolygon) from shapely.ops import polygonize, linemerge from ocsmesh import utils from ocsmesh.raster import Raster from ocsmesh.mesh.base import BaseMesh from ocsmesh.mesh.parsers import grd, sms2dm _logger = logging.getLogger(__name__) class EuclideanMesh(BaseMesh): """Generic Euclidean mesh class This is the base class for 2D or 3D Euclidean mesh. Attributes ---------- tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t] Reference to underlying jigsaw mesh's triangle element structure. triangles : npt.NDArray[np.float32] Array of node index for triangular elements. quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t] Reference to underlying jigsaw mesh's quadrangle element structure. quads : npt.NDArray[np.float32] Array of node index for quadrangular elements. crs : CRS Coodrinate reference system of the mesh object hull : Hull Handle to hull calculation helper object nodes : Nodes Handle to node handler helper object elements : Elements Handle to element handler helper object Methods ------- write(path, overwrite=False, format='grd') Export mesh object to the disk in the specified format. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type. """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID != 'euclidean-mesh': raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, ' "but expected 'euclidean-mesh'.") if not hasattr(mesh, 'crs'): warnings.warn('Input mesh has no CRS information.') mesh.crs = None else: if not isinstance(mesh.crs, CRS): raise ValueError(f'crs property must be of type {CRS}, not ' f'type {type(mesh.crs)}.') self._hull = None self._nodes = None self._elements = None self._msh_t = mesh def write( self, path: Union[str, os.PathLike], overwrite: bool = False, format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622 ) -> None: """Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported. """ path = pathlib.Path(path) if path.exists() and overwrite is not True: raise IOError( f'File {str(path)} exists and overwrite is not True.') if format == 'grd': grd_dict = utils.msh_t_to_grd(self.msh_t) if self._boundaries and self._boundaries.data: grd_dict.update(boundaries=self._boundaries.data) grd.write(grd_dict, path, overwrite) elif format == '2dm': sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite) elif format == 'msh': savemsh(str(path), self.msh_t) elif format == 'vtk': savevtk(str(path), self.msh_t) else: raise ValueError(f'Unhandled format {format}.') @property def tria3(self): """Reference to underlying mesh tirangle element structure""" return self.msh_t.tria3 @property def triangles(self): """Reference to underlying mesh triangle element index array""" return self.msh_t.tria3['index'] @property def quad4(self): """Reference to underlying mesh quadrangle element structure""" return self.msh_t.quad4 @property def quads(self): """Reference to underlying mesh quadrangle element index array""" return self.msh_t.quad4['index'] @property def crs(self): """Reference to underlying mesh crs""" return self.msh_t.crs @property def hull(self): """Reference to hull calculator helper object""" if self._hull is None: self._hull = Hull(self) return self._hull @property def nodes(self): """Reference to node handler helper object""" if self._nodes is None: self._nodes = Nodes(self) return self._nodes @property def elements(self): """Reference to element handler helper object""" if self._elements is None: self._elements = Elements(self) return self._elements class EuclideanMesh2D(EuclideanMesh): """2D Euclidean mesh definition Attributes ---------- boundaries vert2 value bbox Methods ------- get_bbox(crs=None, output_type=None) Gets the bounding box of the mesh elements. tricontourf(**kwargs) Create a contour plot from the value data on the nodes of the mesh interpolate(raster, method='spline', nprocs=None) Interpolate raster date on the nodes. get_contour(level) Get contour lines from node value data at specified levels. get_multipolygon(zmin=None, zmax=None) Get multipolygon of the mesh hull. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``. """ super().__init__(mesh) self._boundaries = None if mesh.ndims != +2: raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, ' "but expected ndims=2.") if len(self.msh_t.value) == 0: self.msh_t.value = np.array( np.full((self.vert2['coord'].shape[0], 1), np.nan)) def get_bbox( self, crs: Union[str, CRS, None] = None, output_type: Literal[None, 'polygon', 'bbox'] = None ) -> Union[Polygon, Bbox]: """Get the bounding box of mesh elements. Parameters ---------- crs : str or CRS or None, default=None CRS to transform the calculated bounding box into before returning output_type : { None, 'polygon', 'bbox'}, default=None Output type Returns ------- Polygon or Bbox Bounding box of the mesh elements. """ output_type = 'polygon' if output_type is None else output_type xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0]) ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1]) crs = self.crs if crs is None else crs if crs is not None: if not self.crs.equals(crs): transformer = Transformer.from_crs( self.crs, crs, always_xy=True) # pylint: disable=E0633 (xmin, xmax), (ymin, ymax) = transformer.transform( (xmin, xmax), (ymin, ymax)) if output_type == 'polygon': # pylint: disable=R1705 return box(xmin, ymin, xmax, ymax) elif output_type == 'bbox': return Bbox([[xmin, ymin], [xmax, ymax]]) raise TypeError( 'Argument output_type must a string literal \'polygon\' or ' '\'bbox\'') @property def boundaries(self): """Handle to boundaries calculator helper object""" if self._boundaries is None: self._boundaries = Boundaries(self) return self._boundaries def tricontourf(self, **kwargs) -> Axes: """Generate contour for the data of triangular elements of the mesh Parameters ---------- **kwargs : dict, optional Passed to underlying `matplotlib` API. Returns ------- Axes Axes on which the filled contour is drawn. """ return utils.tricontourf(self.msh_t, **kwargs) def interpolate( self, raster: Union[Raster, List[Raster]], method: Literal['spline', 'linear', 'nearest'] = 'spline', nprocs: Optional[int] = None, info_out_path: Union[pathlib.Path, str, None] = None, filter_by_shape: bool = False ) -> None: """Interplate values from raster inputs to the mesh nodes. Parameters ---------- raster : Raster or list of Raster A single or a list of rasters from which values are interpolated onto the mesh method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. nprocs : int or None, default=None Number of workers to use when interpolating data. info_out_path : pathlike or str or None Path for the output node interpolation information file filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- None """ if isinstance(raster, Raster): raster = [raster] nprocs = -1 if nprocs is None else nprocs nprocs = cpu_count() if nprocs == -1 else nprocs # Fix an issue on Jupyter notebook where having pool execute # interpolation even in case of nprocs == 1 would results in # application getting stuck if nprocs > 1: with Pool(processes=nprocs) as pool: res = pool.starmap( _mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] ) pool.join() else: res = [_mesh_interpolate_worker( self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] values = self.msh_t.value.flatten() interp_info_map = {} for (mask, _values), rast in zip(res, raster): values[mask] = _values if info_out_path is not None: vert_cs = None rast_crs = rast.crs if rast_crs.is_vertical: if rast_crs.sub_crs_list is not None: for sub_crs in rast_crs.sub_crs_list: if sub_crs.is_vertical: # TODO: What if sub CRS is compound, etc.? vert_cs = sub_crs elif rast_crs.source_crs is not None: if rast_crs.source_crs.is_vertical: # TODO: What if source CRS is compound, etc.? vert_cs = rast_crs.source_crs vert_cs_name = vert_cs.name idxs = np.argwhere(mask).ravel() interp_info_map.update({ idx: (rast.path, vert_cs_name) for idx in idxs}) if info_out_path is not None: coords = self.msh_t.vert2['coord'].copy() geo_coords = coords.copy() if not self.crs.is_geographic: transformer = Transformer.from_crs( self.crs, CRS.from_epsg(4326), always_xy=True) # pylint: disable=E0633 geo_coords[:, 0], geo_coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) vd_idxs=np.array(list(interp_info_map.keys())) df_interp_info = pd.DataFrame( index=vd_idxs, data={ 'x': coords[vd_idxs, 0], 'y': coords[vd_idxs, 1], 'lat': geo_coords[vd_idxs, 0], 'lon': geo_coords[vd_idxs, 1], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()] } ) df_interp_info.sort_index().to_csv( info_out_path, header=False, index=True) self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) def get_contour(self, level: float) -> LineString: """Extract contour lines at the specified `level` from mesh values Parameters ---------- level : float The level at which contour lines must be extracted. Returns ------- LineString Extracted and merged contour lines. Raises ------ ValueError If mesh has nodes that have null value `np.nan`. """ # ONLY SUPPORTS TRIANGLES for attr in ['quad4', 'hexa8']: if len(getattr(self.msh_t, attr)) > 0: warnings.warn( 'Mesh contour extraction only supports triangles') coords = self.msh_t.vert2['coord'] values = self.msh_t.value trias = self.msh_t.tria3['index'] if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") x, y = coords[:, 0], coords[:, 1] features = [] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) _logger.debug('Computing contours...') fig, ax = plt.subplots() ax.tricontour( x, y, trias, values.ravel(), levels=[level]) plt.close(fig) for path_collection in ax.collections: for path in path_collection.get_paths(): try: features.append(LineString(path.vertices)) except ValueError: # LineStrings must have at least 2 coordinate tuples pass return linemerge(features) def get_multipolygon( self, zmin: Optional[float] = None, zmax: Optional[float] = None ) -> MultiPolygon: """Calculate multipolygon covering mesh elements (hull) Parameters ---------- zmin : float or None Minimum elevation to consider for multipolygon extraction zmax : float or None Maximum elevation to consider for multipolygon extraction Returns ------- MultiPolygon Calculated multipolygon shape """ values = self.msh_t.value mask = np.ones(values.shape) if zmin is not None: mask = np.logical_and(mask, values > zmin) if zmax is not None: mask = np.logical_and(mask, values < zmax) # Assuming value is of shape (N, 1) # ravel to make sure it's 1D verts_in = np.argwhere(mask).ravel() clipped_mesh = utils.clip_mesh_by_vertex( self.msh_t, verts_in, can_use_other_verts=True) boundary_edges = utils.get_boundary_edges(clipped_mesh) coords = clipped_mesh.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} poly_gen = polygonize(coords[boundary_edges]) polys = list(poly_gen) polys = sorted(polys, key=lambda p: p.area, reverse=True) rings = [p.exterior for p in polys] n_parents = np.zeros((len(rings),)) represent = np.array([r.coords[0] for r in rings]) for e, ring in enumerate(rings[:-1]): path = Path(ring.coords, closed=True) n_parents = n_parents + np.pad( np.array([ path.contains_point(pt) for pt in represent[e+1:]]), (e+1, 0), 'constant', constant_values=0) # Get actual polygons based on logic described above polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2] return MultiPolygon(polys) @property def vert2(self): """Reference to underlying mesh 2D vertices structure""" return self.msh_t.vert2 @property def value(self): """Reference to underlying mesh values""" return self.msh_t.value @property def bbox(self): """Calculates and returns bounding box of the mesh hull. See Also -------- get_bbox """ return self.get_bbox() MeshType = Union[EuclideanMesh2D] class Mesh(BaseMesh): """Mesh object factory Factory class that creates and returns concrete mesh object based on the input types. Methods ------- open(path, crs=None) Read mesh data from a file on disk. """ def __new__(cls, mesh: jigsaw_msh_t) -> MeshType: """Construct a concrete mesh object. Parameters ---------- mesh : jigsaw_msh_t Input jigsaw mesh object Returns ------- MeshType Mesh object created from the input Raises ------ TypeError Input `mesh` is not a `jigsaw_msh_t` object. NotImplementedError Input `mesh` object cannot be used to create a EuclideanMesh2D """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID == 'euclidean-mesh': if mesh.ndims == 2: return EuclideanMesh2D(mesh) raise NotImplementedError( f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not ' 'handled.') raise NotImplementedError(f'mshID={mesh.mshID} not handled.') @staticmethod def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType: """Read mesh from a file on disk Parameters ---------- path : path-like Path to the file containig mesh. crs : CRS or None, default=None CRS of the mesh in the path. Overwrites any info read from file, no transformation is done. Returns ------- MeshType Mesh object created by reading the file. Raises ------ TypeError If cannot determine the input mesh type. Notes ----- Currently only SMS-2DM and GRD formats are supported for reading. """ try: msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs)) msh_t.value = np.negative(msh_t.value) return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 if 'not a valid grd file' in str(e): pass else: raise e try: return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs))) except ValueError: pass try: msh_t = jigsaw_msh_t() loadmsh(msh_t, path) msh_t.crs = crs return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 pass raise TypeError( f'Unable to automatically determine file type for {str(path)}.') class Rings: """Helper class for handling mesh rings. This is a helper class to manage the calculation of internal and external rings of the mesh polygon or hull. Attributes ---------- Methods ------- __call__() Returns all rings of the mesh hull interior() Return the interior rings of the mesh hull exterior() Return the exterior rings of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the ring calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates rings. """ self.mesh = mesh # MASKED: __call__ function (lines 714-752) def exterior(self) -> gpd.GeoDataFrame: """Extracts the exterior ring from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior ring of the mesh hull polygon. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Extracts the interior rings from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior rings of the mesh hull polygon. """ return self().loc[self()['type'] == 'interior'] class Edges: """Helper class for handling mesh boundary edges. Attributes ---------- Methods ------- __call__() Return all boundary edges of the mesh hull interior() Return the interior boundary edges of the mesh hull exterior() Return the exterior boundary edges of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the edge calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which boundary edges are calculated. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for ring in self.mesh.hull.rings().itertuples(): coords = ring.geometry.coords for i in range(1, len(coords)): data.append({ "geometry": LineString([coords[i-1], coords[i]]), "bnd_id": ring.bnd_id, "type": ring.type}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Retruns exterior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Retruns interior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'interior'] class Hull: """Helper class for handling mesh hull calculations. This class wraps the functionality of ring and edge classes and adds additional methods to calculate or extract the polygon or triangulation of the mesh Attributes ---------- Methods ------- __call__() Calculates all the polys from all mesh rings exterior() Calculates the exterior rings of the mesh hull. interior() Calculates the interior rings of the mesh hull. implode() Calculates all the polygons (including isolated domain islands) in the mesh and returns a table of polygons. multipolygon() Calculates all the polygons (including isolated domain islands) in the mesh and returns a multipolygon. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize helper class for handling mesh hull calculations Parameters ---------- mesh : EuclideanMesh Input mesh for which hull calculations are done. Notes ----- This object holds onto the ring and edge calculator objects as well as a reference to the input mesh. """ self.mesh = mesh self.rings = Rings(mesh) self.edges = Edges(mesh) @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for bnd_id in np.unique(self.rings()['bnd_id'].tolist()): exterior = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior')] interiors = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior')] data.append({ "geometry": Polygon( exterior.iloc[0].geometry.coords, [row.geometry.coords for _, row in interiors.iterrows()]), "bnd_id": bnd_id }) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Creates polygons from exterior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from exterior rings of the mesh hull """ data = [] for exterior in self.rings().loc[ self.rings()['type'] == 'exterior'].itertuples(): data.append({"geometry": Polygon(exterior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def interior(self) -> gpd.GeoDataFrame: """Creates polygons from interior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from interior rings of the mesh hull """ data = [] for interior in self.rings().loc[ self.rings()['type'] == 'interior'].itertuples(): data.append({"geometry": Polygon(interior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def implode(self) -> gpd.GeoDataFrame: """Creates a dataframe from mesh polygons. Parameters ---------- Returns ------ gpd.GeoDataFrame Dataframe containing polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The difference of the return value of this method and `__call__` is that the `implode` returns a dataframe with a single `MultiPolygon` where as `__call__` returns a dataframe with multiple `Polygon` entries with associated `bnd_id`. """ return gpd.GeoDataFrame( {"geometry": MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) def multipolygon(self) -> MultiPolygon: """Returns mesh multi-polygons. Parameters ---------- Returns ------ MultiPolygon Combined shape of polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. implode() Dataframe with a single combined multipolygon of the mesh polygons. Notes ----- The difference of the return value of this method and `implode` is that `multipolygon` returns a `MultiPolygon` object where as `implode` returns a dataframe warpping the multipolygon object. """ mp = self.implode().iloc[0].geometry if isinstance(mp, Polygon): mp = MultiPolygon([mp]) return mp def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.mesh.msh_t.tria3['index'].tolist() for quad in self.mesh.msh_t.quad4['index']: triangles.extend([ [quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]] ]) return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) class Nodes: """Helper class for handling mesh nodes. Attributes ---------- id_to_index : dict Mapping to convert node IDs to node indexes. index_to_id : dict Mapping to convert node indexes to node IDs. Methods ------- __call__() Creates a mapping between node IDs (index + 1) and node coordinates id() Returns list of node IDs. index() Return array of node indices. coords() Return mesh coordinates. values() Return values stored for mesh nodes. get_index_by_id(node_id) Get the node index based on node ID. get_id_by_index(index) Get the node ID based on the node index. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes node handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles nodes info. """ self.mesh = mesh self._id_to_index = None self._index_to_id = None @lru_cache(maxsize=1) def __call__(self) -> Dict[int, int]: """Creates a mapping between node IDs and indexes. Parameters ---------- Returns ------- dict Mapping between node IDs and indexes. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return {i+1: coord for i, coord in enumerate(self.coords())} def id(self) -> List[int]: """Retrives a list of element IDs. Parameters ---------- Returns ------- list of int List of node IDs as created by `__call__` """ return list(self().keys()) def index(self) -> npt.NDArray[int]: """Retrives an array of element indexes. Parameters ---------- Returns ------- array-like Array of node indexes. """ return np.arange(len(self())) def coords(self) -> npt.NDArray[np.float32]: """Retrieve the coordinates of mesh nodes Parameters ---------- Returns ------- array-like Coordinates of the mesh nodes as returned by `BaseMesh.coord` """ return self.mesh.coord def values(self): """Retrieve the values stored for mesh nodes Parameters ---------- Returns ------- array-like Values on the mesh nodes as returned by `BaseMesh.values` """ return self.mesh.values def get_index_by_id(self, node_id): """Converts mesh ID to mesh index. Parameters ---------- node_id : int ID of the node of interest Returns ------- int Index of the node of interest """ return self.id_to_index[node_id] def get_id_by_index(self, index: int): """Converts mesh index to mesh ID. Parameters ---------- index : int Index of the node of interest. Returns ------- int ID of the node of interest """ return self.index_to_id[index] @property def id_to_index(self) -> Dict[int, int]: """Read-only property returning the mapping of ID to index Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._id_to_index is None: self._id_to_index = {node_id: index for index, node_id in enumerate(self().keys())} return self._id_to_index @property def index_to_id(self) -> Dict[int, int]: """Read-only property returning the mapping of index to ID Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._index_to_id is None: self._index_to_id = dict(enumerate(self().keys())) return self._index_to_id # def get_indexes_around_index(self, index): # indexes_around_index = self.__dict__.get('indexes_around_index') # if indexes_around_index is None: # def append(geom): # for simplex in geom: # for i, j in permutations(simplex, 2): # indexes_around_index[i].add(j) # indexes_around_index = defaultdict(set) # append(self.gr3.elements.triangles()) # append(self.gr3.elements.quads()) # self.__dict__['indexes_around_index'] = indexes_around_index # return list(indexes_around_index[index]) class Elements: """Helper class for handling mesh elements. Attributes ---------- Methods -------- __call__() Creates a mapping between element IDs and associated node IDs. id() Returns a list of element IDs. index() Returns an array of element indexes. array() Creates and returns a masked array of element node indices. triangles() Creates and returns a 2D array of triangular element node indices. quads() Creates and returns a 2D array of quadrangular element node indices. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. geodataframe() Creates and returns a dataframe of with polygon entires for each element. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize the element handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles elements info. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> Dict[int, npt.NDArray[int]]: """Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ elements = {i+1: index+1 for i, index in enumerate(self.mesh.msh_t.tria3['index'])} elements.update({i+len(elements)+1: index+1 for i, index in enumerate(self.mesh.msh_t.quad4['index'])}) return elements @lru_cache(maxsize=1) def id(self) -> List[int]: """Retrieves the list of element IDs as returned by `__call__` Parameters ---------- Returns ------- list of int List of element IDs. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return list(self().keys()) @lru_cache(maxsize=1) def index(self) -> npt.NDArray[int]: """Retrieves an array of element indices Parameters ---------- Returns ------- npt.NDArray 1D array of element indices. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.arange(len(self())) def array(self) -> npt.NDArray[int]: """Retrieves a masked array of element node IDs. The return value is ``n x m`` where ``n`` is the number of elements and ``m`` is the maximum number of element nodes, e.g. if there are only trias, then it's 3, for trias and quads it is 4. Parameters ---------- Returns ------- npt.NDArray Masked array where elements with fewer associated nodes have trailing masked node columns in the array. """ rank = int(max(map(len, self().values()))) array = np.full((len(self()), rank), -1) for i, elem_nd_ids in enumerate(self().values()): row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))) array[i, :len(row)] = row return np.ma.masked_equal(array, -1) @lru_cache(maxsize=1) def triangles(self) -> npt.NDArray[int]: """Retrieves an array of tria element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for triangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 3]) @lru_cache(maxsize=1) def quads(self): """Retrieves an array of quad element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for quadrangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 4]) def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.triangles().tolist() for quad in self.quads(): # TODO: Not tested. triangles.append([quad[0], quad[1], quad[3]]) triangles.append([quad[1], quad[2], quad[3]]) return Triangulation( self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) def geodataframe(self) -> gpd.GeoDataFrame: """Create polygons for each element and return in dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe created from entries of `Polygon` type for each element. """ data = [] for elem_id, elem_nd_ids in self().items(): data.append({ 'geometry': Polygon( self.mesh.coord[list( map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) class Boundaries: """Helper class for mesh boundary condition calculation Attributes ---------- data : dict Mapping for boundary information Methods ------- __call__() Retrieves a dataframe for all boundary shapes and type info. __len__() Gets the number of calculated boundary segments. ocean() Retrieves a dataframe containing shapes and type info of ocean boundaries land() Retrieves a dataframe containing shapes and type info of land boundaries interior() Retrieves a dataframe containing shapes and type info of island boundaries auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1) Automatically generate boundary information based on the input land indicator `threshold` """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize boundary helper object Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates boundaries. """ # TODO: Add a way to manually initialize self.mesh = mesh self._ocean = gpd.GeoDataFrame() self._land = gpd.GeoDataFrame() self._interior = gpd.GeoDataFrame() self._data = defaultdict(defaultdict) @lru_cache(maxsize=1) def _init_dataframes(self) -> None: """Internal: Creates boundary dataframes based on boundary data Parameters ---------- Returns ------- None Notes ----- This method doesn't have any return value, but it is cached so that on re-execution it doesn't recalculate. """ boundaries = self._data ocean_boundaries = [] land_boundaries = [] interior_boundaries = [] if boundaries is not None: for ibtype, bnds in boundaries.items(): if ibtype is None: for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) ocean_boundaries.append({ 'id': bnd_id, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) elif str(ibtype).endswith('1'): for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) interior_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) else: for bnd_id, data in bnds.items(): _indexes = np.array(data['indexes']) if _indexes.ndim > 1: # ndim > 1 implies we're dealing with an ADCIRC # mesh that includes boundary pairs, such as weir new_indexes = [] for i, line in enumerate(_indexes.T): if i % 2 != 0: new_indexes.extend(np.flip(line)) else: new_indexes.extend(line) _indexes = np.array(new_indexes).flatten() else: _indexes = _indexes.flatten() indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes)) land_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) self._ocean = gpd.GeoDataFrame(ocean_boundaries) self._land = gpd.GeoDataFrame(land_boundaries) self._interior = gpd.GeoDataFrame(interior_boundaries) def ocean(self) -> gpd.GeoDataFrame: """Retrieve the ocean boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of ocean open boundary. """ self._init_dataframes() return self._ocean def land(self): """Retrieve the land boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of land boundary. """ self._init_dataframes() return self._land def interior(self): """Retrieve the island boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of island boundary. """ self._init_dataframes() return self._interior @property def data(self) -> Dict[Optional[int], Any]: """Read-only property referencing the boundary data dictionary""" return self._data @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ self._init_dataframes() data = [] for bnd in self.ocean().itertuples(): data.append({ 'id': bnd.id, 'ibtype': None, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.land().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.interior().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def __len__(self) -> int: """Returns the number of boundary segments""" return len(self()) def auto_generate( self, threshold: float = 0., land_ibtype: int = 0, interior_ibtype: int = 1, ): """Automatically detect boundaries based on elevation data. Parameters ---------- threshold : float, default=0 Threshold above which nodes are considered dry nodes for ocean vs land boundary detection land_ibtype : int, default=0 Value to assign to land boundary type interior_ibtype : int, default=1 Value to assign to island boundary type Returns ------- None Raises ------ ValueError If any of the values assigned to a mesh node is `np.nan`. Notes ----- An edge is considered dry if any of the attached nodes are dry (its elevation is larger than or equal to the `threshold`). """ values = self.mesh.value if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") coords = self.mesh.msh_t.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} polys = utils.get_mesh_polygons(self.mesh.msh_t) # TODO: Split using shapely to get bdry segments boundaries = defaultdict(defaultdict) bdry_type = dict get_id = self.mesh.nodes.get_id_by_index # generate exterior boundaries for poly in polys: ext_ring_coo = poly.exterior.coords ext_ring = np.array([ (coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[e + 1]]) for e, coo in enumerate(ext_ring_coo[:-1])]) # find boundary edges edge_tag = np.full(ext_ring.shape, 0) edge_tag[ np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1 edge_tag[ np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1 edge_tag[ np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1 edge_tag[ np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1 # sort boundary edges ocean_boundary = [] land_boundary = [] for i, (e0, e1) in enumerate(edge_tag): if np.any(np.asarray((e0, e1)) == 1): land_boundary.append(tuple(ext_ring[i, :])) elif np.any(np.asarray((e0, e1)) == -1): ocean_boundary.append(tuple(ext_ring[i, :])) # ocean_boundaries = utils.sort_edges(ocean_boundary) # land_boundaries = utils.sort_edges(land_boundary) ocean_boundaries = [] if len(ocean_boundary) != 0: #pylint: disable=not-an-iterable ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist()) ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs ocean_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in ocean_segs] land_boundaries = [] if len(land_boundary) != 0: #pylint: disable=not-an-iterable land_segs = linemerge(coords[np.array(land_boundary)].tolist()) land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs land_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in land_segs] _bnd_id = len(boundaries[None]) for bnd in ocean_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[None][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # add land boundaries _bnd_id = len(boundaries[land_ibtype]) for bnd in land_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[land_ibtype][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # generate interior boundaries _bnd_id = 0 interior_boundaries = defaultdict() for poly in polys: interiors = poly.interiors for interior in interiors: int_ring_coo = interior.coords int_ring = [ (coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[e + 1]]) for e, coo in enumerate(int_ring_coo[:-1])] # TODO: Do we still need these? e0, e1 = [list(t) for t in zip(*int_ring)] if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0: e0 = e0[::-1] e1 = e1[::-1] e0 = [get_id(vert) for vert in e0] e0.append(e0[0]) interior_boundaries[_bnd_id] = e0 _bnd_id += 1 for bnd_id, data in interior_boundaries.items(): boundaries[interior_ibtype][bnd_id] = bdry_type( indexes=data, properties={}) self._data = boundaries self._init_dataframes.cache_clear() self.__call__.cache_clear() self._init_dataframes() SortedRingType = Dict[int, Dict[Literal['exterior', 'interiors'], Union[npt.NDArray, List[npt.NDArray]]] ] def sort_rings( index_rings: List[List[Tuple[int, int]]], vertices: npt.NDArray[np.float32]) -> SortedRingType: """Sorts a list of index-rings. Takes a list of unsorted index rings and sorts them into "exterior" and "interior" components. Any doubly-nested rings are considered exterior rings. Parameters ---------- index_rings : List[List[Tuple[int, int]]] Unosorted list of list of mesh edges as specified by end node indexs of each edge. vertices : npt.NDArray[np.float32] 2D ``n x 2`` array of node coordinate couples. Returns ------- SortedRingType Dictionary of information aboout polygon boundaries extracted based on the input Notes ----- The return value is a mapping of ring index to dictionary containing exterior and interior linear ring information as numpy array This function is not currently used, instead a different faster approach is used for boundary and polygon calculation from elements. """ # TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can # probably be optimized using shapely. # sort index_rings into corresponding "polygons" areas = [] for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = {} _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = [] for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = [] for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings def _mesh_interpolate_worker( coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[str, Path], chunk_size: Optional[int], method: Literal['spline', 'linear', 'nearest'] = "spline", filter_by_shape: bool = False): """Interpolator worker function to be used in parallel calls Parameters ---------- coords : npt.NDArray[np.float32] Mesh node coordinates. coords_crs : CRS Coordinate reference system of the input mesh coordinates. raster_path : str or Path Path to the raster temporary working file. chunk_size : int or None Chunk size for windowing over the raster. method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- idxs : npt.NDArray[bool] Mask of the nodes whose values are updated by current interpolation values : npt.NDArray[np.float32] Interpolated values. Raises ------ ValueError If specified interpolation `method` is not supported. """ coords = np.array(coords) raster = Raster(raster_path) idxs = [] values = [] for window in raster.iter_windows(chunk_size=chunk_size, overlap=2): if not raster.crs.equals(coords_crs): transformer = Transformer.from_crs( coords_crs, raster.crs, always_xy=True) # pylint: disable=E0633 coords[:, 0], coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) xi = raster.get_x(window) yi = raster.get_y(window) # Use masked array to ignore missing values from DEM zi = raster.get_values(window=window, masked=True) if not filter_by_shape: _idxs = np.logical_and( np.logical_and( np.min(xi) <= coords[:, 0], np.max(xi) >= coords[:, 0]), np.logical_and( np.min(yi) <= coords[:, 1], np.max(yi) >= coords[:, 1])) else: shape = raster.get_multipolygon() gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1]) _idxs = gs_pt.intersects(shape) interp_mask = None if method == 'spline': f = RectBivariateSpline( xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0, # bbox=[min(x), max(x), min(y), max(y)] # ?? ) _values = f.ev(coords[_idxs, 0], coords[_idxs, 1]) elif method in ['nearest', 'linear']: # Inspired by StackOverflow 35807321 if np.any(zi.mask): m_interp = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method ) # Pick nodes NOT "contaminated" by masked values interp_mask = m_interp(coords[_idxs]) > 0 f = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi).T, method=method ) _values = f(coords[_idxs]) else: raise ValueError( f"Invalid value method specified <{method}>!") if interp_mask is not None: # pylint: disable=invalid-unary-operand-type helper = np.ones_like(_values).astype(bool) helper[interp_mask] = False # _idxs is inverse mask _idxs[_idxs] = helper _values = _values[~interp_mask] idxs.append(_idxs) values.append(_values) return (np.hstack(idxs), np.hstack(values))
@lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ polys = utils.get_mesh_polygons(self.mesh.msh_t) data = [] bnd_id = 0 for poly in polys: data.append({ "geometry": poly.exterior, "bnd_id": bnd_id, "type": 'exterior' }) for interior in poly.interiors: data.append({ "geometry": interior, "bnd_id": bnd_id, "type": 'interior' }) bnd_id = bnd_id + 1 return gpd.GeoDataFrame(data, crs=self.mesh.crs)
714
752
"""This module defines classes that handle mesh and mesh operations. This module defines a factory class for mesh, similar to geometry and size function factory class. It also defines concrete mesh types. Currently two concrete mesh types are defined for generic Eucledian mesh and specific 2D Eucledian mesh. """ from functools import lru_cache import logging from multiprocessing import Pool, cpu_count import os import pathlib from collections import defaultdict import warnings from typing import Union, List, Tuple, Dict, Any, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal import pandas as pd import geopandas as gpd from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk from matplotlib.path import Path from matplotlib.transforms import Bbox from matplotlib.tri import Triangulation from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np import numpy.typing as npt from pyproj import CRS, Transformer from scipy.interpolate import ( RectBivariateSpline, RegularGridInterpolator) from shapely.geometry import ( LineString, box, Polygon, MultiPolygon) from shapely.ops import polygonize, linemerge from ocsmesh import utils from ocsmesh.raster import Raster from ocsmesh.mesh.base import BaseMesh from ocsmesh.mesh.parsers import grd, sms2dm _logger = logging.getLogger(__name__) class EuclideanMesh(BaseMesh): """Generic Euclidean mesh class This is the base class for 2D or 3D Euclidean mesh. Attributes ---------- tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t] Reference to underlying jigsaw mesh's triangle element structure. triangles : npt.NDArray[np.float32] Array of node index for triangular elements. quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t] Reference to underlying jigsaw mesh's quadrangle element structure. quads : npt.NDArray[np.float32] Array of node index for quadrangular elements. crs : CRS Coodrinate reference system of the mesh object hull : Hull Handle to hull calculation helper object nodes : Nodes Handle to node handler helper object elements : Elements Handle to element handler helper object Methods ------- write(path, overwrite=False, format='grd') Export mesh object to the disk in the specified format. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type. """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID != 'euclidean-mesh': raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, ' "but expected 'euclidean-mesh'.") if not hasattr(mesh, 'crs'): warnings.warn('Input mesh has no CRS information.') mesh.crs = None else: if not isinstance(mesh.crs, CRS): raise ValueError(f'crs property must be of type {CRS}, not ' f'type {type(mesh.crs)}.') self._hull = None self._nodes = None self._elements = None self._msh_t = mesh def write( self, path: Union[str, os.PathLike], overwrite: bool = False, format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622 ) -> None: """Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported. """ path = pathlib.Path(path) if path.exists() and overwrite is not True: raise IOError( f'File {str(path)} exists and overwrite is not True.') if format == 'grd': grd_dict = utils.msh_t_to_grd(self.msh_t) if self._boundaries and self._boundaries.data: grd_dict.update(boundaries=self._boundaries.data) grd.write(grd_dict, path, overwrite) elif format == '2dm': sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite) elif format == 'msh': savemsh(str(path), self.msh_t) elif format == 'vtk': savevtk(str(path), self.msh_t) else: raise ValueError(f'Unhandled format {format}.') @property def tria3(self): """Reference to underlying mesh tirangle element structure""" return self.msh_t.tria3 @property def triangles(self): """Reference to underlying mesh triangle element index array""" return self.msh_t.tria3['index'] @property def quad4(self): """Reference to underlying mesh quadrangle element structure""" return self.msh_t.quad4 @property def quads(self): """Reference to underlying mesh quadrangle element index array""" return self.msh_t.quad4['index'] @property def crs(self): """Reference to underlying mesh crs""" return self.msh_t.crs @property def hull(self): """Reference to hull calculator helper object""" if self._hull is None: self._hull = Hull(self) return self._hull @property def nodes(self): """Reference to node handler helper object""" if self._nodes is None: self._nodes = Nodes(self) return self._nodes @property def elements(self): """Reference to element handler helper object""" if self._elements is None: self._elements = Elements(self) return self._elements class EuclideanMesh2D(EuclideanMesh): """2D Euclidean mesh definition Attributes ---------- boundaries vert2 value bbox Methods ------- get_bbox(crs=None, output_type=None) Gets the bounding box of the mesh elements. tricontourf(**kwargs) Create a contour plot from the value data on the nodes of the mesh interpolate(raster, method='spline', nprocs=None) Interpolate raster date on the nodes. get_contour(level) Get contour lines from node value data at specified levels. get_multipolygon(zmin=None, zmax=None) Get multipolygon of the mesh hull. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``. """ super().__init__(mesh) self._boundaries = None if mesh.ndims != +2: raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, ' "but expected ndims=2.") if len(self.msh_t.value) == 0: self.msh_t.value = np.array( np.full((self.vert2['coord'].shape[0], 1), np.nan)) def get_bbox( self, crs: Union[str, CRS, None] = None, output_type: Literal[None, 'polygon', 'bbox'] = None ) -> Union[Polygon, Bbox]: """Get the bounding box of mesh elements. Parameters ---------- crs : str or CRS or None, default=None CRS to transform the calculated bounding box into before returning output_type : { None, 'polygon', 'bbox'}, default=None Output type Returns ------- Polygon or Bbox Bounding box of the mesh elements. """ output_type = 'polygon' if output_type is None else output_type xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0]) ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1]) crs = self.crs if crs is None else crs if crs is not None: if not self.crs.equals(crs): transformer = Transformer.from_crs( self.crs, crs, always_xy=True) # pylint: disable=E0633 (xmin, xmax), (ymin, ymax) = transformer.transform( (xmin, xmax), (ymin, ymax)) if output_type == 'polygon': # pylint: disable=R1705 return box(xmin, ymin, xmax, ymax) elif output_type == 'bbox': return Bbox([[xmin, ymin], [xmax, ymax]]) raise TypeError( 'Argument output_type must a string literal \'polygon\' or ' '\'bbox\'') @property def boundaries(self): """Handle to boundaries calculator helper object""" if self._boundaries is None: self._boundaries = Boundaries(self) return self._boundaries def tricontourf(self, **kwargs) -> Axes: """Generate contour for the data of triangular elements of the mesh Parameters ---------- **kwargs : dict, optional Passed to underlying `matplotlib` API. Returns ------- Axes Axes on which the filled contour is drawn. """ return utils.tricontourf(self.msh_t, **kwargs) def interpolate( self, raster: Union[Raster, List[Raster]], method: Literal['spline', 'linear', 'nearest'] = 'spline', nprocs: Optional[int] = None, info_out_path: Union[pathlib.Path, str, None] = None, filter_by_shape: bool = False ) -> None: """Interplate values from raster inputs to the mesh nodes. Parameters ---------- raster : Raster or list of Raster A single or a list of rasters from which values are interpolated onto the mesh method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. nprocs : int or None, default=None Number of workers to use when interpolating data. info_out_path : pathlike or str or None Path for the output node interpolation information file filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- None """ if isinstance(raster, Raster): raster = [raster] nprocs = -1 if nprocs is None else nprocs nprocs = cpu_count() if nprocs == -1 else nprocs # Fix an issue on Jupyter notebook where having pool execute # interpolation even in case of nprocs == 1 would results in # application getting stuck if nprocs > 1: with Pool(processes=nprocs) as pool: res = pool.starmap( _mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] ) pool.join() else: res = [_mesh_interpolate_worker( self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] values = self.msh_t.value.flatten() interp_info_map = {} for (mask, _values), rast in zip(res, raster): values[mask] = _values if info_out_path is not None: vert_cs = None rast_crs = rast.crs if rast_crs.is_vertical: if rast_crs.sub_crs_list is not None: for sub_crs in rast_crs.sub_crs_list: if sub_crs.is_vertical: # TODO: What if sub CRS is compound, etc.? vert_cs = sub_crs elif rast_crs.source_crs is not None: if rast_crs.source_crs.is_vertical: # TODO: What if source CRS is compound, etc.? vert_cs = rast_crs.source_crs vert_cs_name = vert_cs.name idxs = np.argwhere(mask).ravel() interp_info_map.update({ idx: (rast.path, vert_cs_name) for idx in idxs}) if info_out_path is not None: coords = self.msh_t.vert2['coord'].copy() geo_coords = coords.copy() if not self.crs.is_geographic: transformer = Transformer.from_crs( self.crs, CRS.from_epsg(4326), always_xy=True) # pylint: disable=E0633 geo_coords[:, 0], geo_coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) vd_idxs=np.array(list(interp_info_map.keys())) df_interp_info = pd.DataFrame( index=vd_idxs, data={ 'x': coords[vd_idxs, 0], 'y': coords[vd_idxs, 1], 'lat': geo_coords[vd_idxs, 0], 'lon': geo_coords[vd_idxs, 1], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()] } ) df_interp_info.sort_index().to_csv( info_out_path, header=False, index=True) self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) def get_contour(self, level: float) -> LineString: """Extract contour lines at the specified `level` from mesh values Parameters ---------- level : float The level at which contour lines must be extracted. Returns ------- LineString Extracted and merged contour lines. Raises ------ ValueError If mesh has nodes that have null value `np.nan`. """ # ONLY SUPPORTS TRIANGLES for attr in ['quad4', 'hexa8']: if len(getattr(self.msh_t, attr)) > 0: warnings.warn( 'Mesh contour extraction only supports triangles') coords = self.msh_t.vert2['coord'] values = self.msh_t.value trias = self.msh_t.tria3['index'] if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") x, y = coords[:, 0], coords[:, 1] features = [] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) _logger.debug('Computing contours...') fig, ax = plt.subplots() ax.tricontour( x, y, trias, values.ravel(), levels=[level]) plt.close(fig) for path_collection in ax.collections: for path in path_collection.get_paths(): try: features.append(LineString(path.vertices)) except ValueError: # LineStrings must have at least 2 coordinate tuples pass return linemerge(features) def get_multipolygon( self, zmin: Optional[float] = None, zmax: Optional[float] = None ) -> MultiPolygon: """Calculate multipolygon covering mesh elements (hull) Parameters ---------- zmin : float or None Minimum elevation to consider for multipolygon extraction zmax : float or None Maximum elevation to consider for multipolygon extraction Returns ------- MultiPolygon Calculated multipolygon shape """ values = self.msh_t.value mask = np.ones(values.shape) if zmin is not None: mask = np.logical_and(mask, values > zmin) if zmax is not None: mask = np.logical_and(mask, values < zmax) # Assuming value is of shape (N, 1) # ravel to make sure it's 1D verts_in = np.argwhere(mask).ravel() clipped_mesh = utils.clip_mesh_by_vertex( self.msh_t, verts_in, can_use_other_verts=True) boundary_edges = utils.get_boundary_edges(clipped_mesh) coords = clipped_mesh.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} poly_gen = polygonize(coords[boundary_edges]) polys = list(poly_gen) polys = sorted(polys, key=lambda p: p.area, reverse=True) rings = [p.exterior for p in polys] n_parents = np.zeros((len(rings),)) represent = np.array([r.coords[0] for r in rings]) for e, ring in enumerate(rings[:-1]): path = Path(ring.coords, closed=True) n_parents = n_parents + np.pad( np.array([ path.contains_point(pt) for pt in represent[e+1:]]), (e+1, 0), 'constant', constant_values=0) # Get actual polygons based on logic described above polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2] return MultiPolygon(polys) @property def vert2(self): """Reference to underlying mesh 2D vertices structure""" return self.msh_t.vert2 @property def value(self): """Reference to underlying mesh values""" return self.msh_t.value @property def bbox(self): """Calculates and returns bounding box of the mesh hull. See Also -------- get_bbox """ return self.get_bbox() MeshType = Union[EuclideanMesh2D] class Mesh(BaseMesh): """Mesh object factory Factory class that creates and returns concrete mesh object based on the input types. Methods ------- open(path, crs=None) Read mesh data from a file on disk. """ def __new__(cls, mesh: jigsaw_msh_t) -> MeshType: """Construct a concrete mesh object. Parameters ---------- mesh : jigsaw_msh_t Input jigsaw mesh object Returns ------- MeshType Mesh object created from the input Raises ------ TypeError Input `mesh` is not a `jigsaw_msh_t` object. NotImplementedError Input `mesh` object cannot be used to create a EuclideanMesh2D """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID == 'euclidean-mesh': if mesh.ndims == 2: return EuclideanMesh2D(mesh) raise NotImplementedError( f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not ' 'handled.') raise NotImplementedError(f'mshID={mesh.mshID} not handled.') @staticmethod def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType: """Read mesh from a file on disk Parameters ---------- path : path-like Path to the file containig mesh. crs : CRS or None, default=None CRS of the mesh in the path. Overwrites any info read from file, no transformation is done. Returns ------- MeshType Mesh object created by reading the file. Raises ------ TypeError If cannot determine the input mesh type. Notes ----- Currently only SMS-2DM and GRD formats are supported for reading. """ try: msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs)) msh_t.value = np.negative(msh_t.value) return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 if 'not a valid grd file' in str(e): pass else: raise e try: return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs))) except ValueError: pass try: msh_t = jigsaw_msh_t() loadmsh(msh_t, path) msh_t.crs = crs return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 pass raise TypeError( f'Unable to automatically determine file type for {str(path)}.') class Rings: """Helper class for handling mesh rings. This is a helper class to manage the calculation of internal and external rings of the mesh polygon or hull. Attributes ---------- Methods ------- __call__() Returns all rings of the mesh hull interior() Return the interior rings of the mesh hull exterior() Return the exterior rings of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the ring calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates rings. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ polys = utils.get_mesh_polygons(self.mesh.msh_t) data = [] bnd_id = 0 for poly in polys: data.append({ "geometry": poly.exterior, "bnd_id": bnd_id, "type": 'exterior' }) for interior in poly.interiors: data.append({ "geometry": interior, "bnd_id": bnd_id, "type": 'interior' }) bnd_id = bnd_id + 1 return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Extracts the exterior ring from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior ring of the mesh hull polygon. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Extracts the interior rings from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior rings of the mesh hull polygon. """ return self().loc[self()['type'] == 'interior'] class Edges: """Helper class for handling mesh boundary edges. Attributes ---------- Methods ------- __call__() Return all boundary edges of the mesh hull interior() Return the interior boundary edges of the mesh hull exterior() Return the exterior boundary edges of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the edge calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which boundary edges are calculated. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for ring in self.mesh.hull.rings().itertuples(): coords = ring.geometry.coords for i in range(1, len(coords)): data.append({ "geometry": LineString([coords[i-1], coords[i]]), "bnd_id": ring.bnd_id, "type": ring.type}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Retruns exterior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Retruns interior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'interior'] class Hull: """Helper class for handling mesh hull calculations. This class wraps the functionality of ring and edge classes and adds additional methods to calculate or extract the polygon or triangulation of the mesh Attributes ---------- Methods ------- __call__() Calculates all the polys from all mesh rings exterior() Calculates the exterior rings of the mesh hull. interior() Calculates the interior rings of the mesh hull. implode() Calculates all the polygons (including isolated domain islands) in the mesh and returns a table of polygons. multipolygon() Calculates all the polygons (including isolated domain islands) in the mesh and returns a multipolygon. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize helper class for handling mesh hull calculations Parameters ---------- mesh : EuclideanMesh Input mesh for which hull calculations are done. Notes ----- This object holds onto the ring and edge calculator objects as well as a reference to the input mesh. """ self.mesh = mesh self.rings = Rings(mesh) self.edges = Edges(mesh) @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for bnd_id in np.unique(self.rings()['bnd_id'].tolist()): exterior = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior')] interiors = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior')] data.append({ "geometry": Polygon( exterior.iloc[0].geometry.coords, [row.geometry.coords for _, row in interiors.iterrows()]), "bnd_id": bnd_id }) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Creates polygons from exterior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from exterior rings of the mesh hull """ data = [] for exterior in self.rings().loc[ self.rings()['type'] == 'exterior'].itertuples(): data.append({"geometry": Polygon(exterior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def interior(self) -> gpd.GeoDataFrame: """Creates polygons from interior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from interior rings of the mesh hull """ data = [] for interior in self.rings().loc[ self.rings()['type'] == 'interior'].itertuples(): data.append({"geometry": Polygon(interior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def implode(self) -> gpd.GeoDataFrame: """Creates a dataframe from mesh polygons. Parameters ---------- Returns ------ gpd.GeoDataFrame Dataframe containing polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The difference of the return value of this method and `__call__` is that the `implode` returns a dataframe with a single `MultiPolygon` where as `__call__` returns a dataframe with multiple `Polygon` entries with associated `bnd_id`. """ return gpd.GeoDataFrame( {"geometry": MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) def multipolygon(self) -> MultiPolygon: """Returns mesh multi-polygons. Parameters ---------- Returns ------ MultiPolygon Combined shape of polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. implode() Dataframe with a single combined multipolygon of the mesh polygons. Notes ----- The difference of the return value of this method and `implode` is that `multipolygon` returns a `MultiPolygon` object where as `implode` returns a dataframe warpping the multipolygon object. """ mp = self.implode().iloc[0].geometry if isinstance(mp, Polygon): mp = MultiPolygon([mp]) return mp def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.mesh.msh_t.tria3['index'].tolist() for quad in self.mesh.msh_t.quad4['index']: triangles.extend([ [quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]] ]) return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) class Nodes: """Helper class for handling mesh nodes. Attributes ---------- id_to_index : dict Mapping to convert node IDs to node indexes. index_to_id : dict Mapping to convert node indexes to node IDs. Methods ------- __call__() Creates a mapping between node IDs (index + 1) and node coordinates id() Returns list of node IDs. index() Return array of node indices. coords() Return mesh coordinates. values() Return values stored for mesh nodes. get_index_by_id(node_id) Get the node index based on node ID. get_id_by_index(index) Get the node ID based on the node index. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes node handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles nodes info. """ self.mesh = mesh self._id_to_index = None self._index_to_id = None @lru_cache(maxsize=1) def __call__(self) -> Dict[int, int]: """Creates a mapping between node IDs and indexes. Parameters ---------- Returns ------- dict Mapping between node IDs and indexes. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return {i+1: coord for i, coord in enumerate(self.coords())} def id(self) -> List[int]: """Retrives a list of element IDs. Parameters ---------- Returns ------- list of int List of node IDs as created by `__call__` """ return list(self().keys()) def index(self) -> npt.NDArray[int]: """Retrives an array of element indexes. Parameters ---------- Returns ------- array-like Array of node indexes. """ return np.arange(len(self())) def coords(self) -> npt.NDArray[np.float32]: """Retrieve the coordinates of mesh nodes Parameters ---------- Returns ------- array-like Coordinates of the mesh nodes as returned by `BaseMesh.coord` """ return self.mesh.coord def values(self): """Retrieve the values stored for mesh nodes Parameters ---------- Returns ------- array-like Values on the mesh nodes as returned by `BaseMesh.values` """ return self.mesh.values def get_index_by_id(self, node_id): """Converts mesh ID to mesh index. Parameters ---------- node_id : int ID of the node of interest Returns ------- int Index of the node of interest """ return self.id_to_index[node_id] def get_id_by_index(self, index: int): """Converts mesh index to mesh ID. Parameters ---------- index : int Index of the node of interest. Returns ------- int ID of the node of interest """ return self.index_to_id[index] @property def id_to_index(self) -> Dict[int, int]: """Read-only property returning the mapping of ID to index Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._id_to_index is None: self._id_to_index = {node_id: index for index, node_id in enumerate(self().keys())} return self._id_to_index @property def index_to_id(self) -> Dict[int, int]: """Read-only property returning the mapping of index to ID Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._index_to_id is None: self._index_to_id = dict(enumerate(self().keys())) return self._index_to_id # def get_indexes_around_index(self, index): # indexes_around_index = self.__dict__.get('indexes_around_index') # if indexes_around_index is None: # def append(geom): # for simplex in geom: # for i, j in permutations(simplex, 2): # indexes_around_index[i].add(j) # indexes_around_index = defaultdict(set) # append(self.gr3.elements.triangles()) # append(self.gr3.elements.quads()) # self.__dict__['indexes_around_index'] = indexes_around_index # return list(indexes_around_index[index]) class Elements: """Helper class for handling mesh elements. Attributes ---------- Methods -------- __call__() Creates a mapping between element IDs and associated node IDs. id() Returns a list of element IDs. index() Returns an array of element indexes. array() Creates and returns a masked array of element node indices. triangles() Creates and returns a 2D array of triangular element node indices. quads() Creates and returns a 2D array of quadrangular element node indices. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. geodataframe() Creates and returns a dataframe of with polygon entires for each element. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize the element handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles elements info. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> Dict[int, npt.NDArray[int]]: """Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ elements = {i+1: index+1 for i, index in enumerate(self.mesh.msh_t.tria3['index'])} elements.update({i+len(elements)+1: index+1 for i, index in enumerate(self.mesh.msh_t.quad4['index'])}) return elements @lru_cache(maxsize=1) def id(self) -> List[int]: """Retrieves the list of element IDs as returned by `__call__` Parameters ---------- Returns ------- list of int List of element IDs. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return list(self().keys()) @lru_cache(maxsize=1) def index(self) -> npt.NDArray[int]: """Retrieves an array of element indices Parameters ---------- Returns ------- npt.NDArray 1D array of element indices. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.arange(len(self())) def array(self) -> npt.NDArray[int]: """Retrieves a masked array of element node IDs. The return value is ``n x m`` where ``n`` is the number of elements and ``m`` is the maximum number of element nodes, e.g. if there are only trias, then it's 3, for trias and quads it is 4. Parameters ---------- Returns ------- npt.NDArray Masked array where elements with fewer associated nodes have trailing masked node columns in the array. """ rank = int(max(map(len, self().values()))) array = np.full((len(self()), rank), -1) for i, elem_nd_ids in enumerate(self().values()): row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))) array[i, :len(row)] = row return np.ma.masked_equal(array, -1) @lru_cache(maxsize=1) def triangles(self) -> npt.NDArray[int]: """Retrieves an array of tria element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for triangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 3]) @lru_cache(maxsize=1) def quads(self): """Retrieves an array of quad element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for quadrangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 4]) def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.triangles().tolist() for quad in self.quads(): # TODO: Not tested. triangles.append([quad[0], quad[1], quad[3]]) triangles.append([quad[1], quad[2], quad[3]]) return Triangulation( self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) def geodataframe(self) -> gpd.GeoDataFrame: """Create polygons for each element and return in dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe created from entries of `Polygon` type for each element. """ data = [] for elem_id, elem_nd_ids in self().items(): data.append({ 'geometry': Polygon( self.mesh.coord[list( map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) class Boundaries: """Helper class for mesh boundary condition calculation Attributes ---------- data : dict Mapping for boundary information Methods ------- __call__() Retrieves a dataframe for all boundary shapes and type info. __len__() Gets the number of calculated boundary segments. ocean() Retrieves a dataframe containing shapes and type info of ocean boundaries land() Retrieves a dataframe containing shapes and type info of land boundaries interior() Retrieves a dataframe containing shapes and type info of island boundaries auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1) Automatically generate boundary information based on the input land indicator `threshold` """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize boundary helper object Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates boundaries. """ # TODO: Add a way to manually initialize self.mesh = mesh self._ocean = gpd.GeoDataFrame() self._land = gpd.GeoDataFrame() self._interior = gpd.GeoDataFrame() self._data = defaultdict(defaultdict) @lru_cache(maxsize=1) def _init_dataframes(self) -> None: """Internal: Creates boundary dataframes based on boundary data Parameters ---------- Returns ------- None Notes ----- This method doesn't have any return value, but it is cached so that on re-execution it doesn't recalculate. """ boundaries = self._data ocean_boundaries = [] land_boundaries = [] interior_boundaries = [] if boundaries is not None: for ibtype, bnds in boundaries.items(): if ibtype is None: for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) ocean_boundaries.append({ 'id': bnd_id, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) elif str(ibtype).endswith('1'): for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) interior_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) else: for bnd_id, data in bnds.items(): _indexes = np.array(data['indexes']) if _indexes.ndim > 1: # ndim > 1 implies we're dealing with an ADCIRC # mesh that includes boundary pairs, such as weir new_indexes = [] for i, line in enumerate(_indexes.T): if i % 2 != 0: new_indexes.extend(np.flip(line)) else: new_indexes.extend(line) _indexes = np.array(new_indexes).flatten() else: _indexes = _indexes.flatten() indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes)) land_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) self._ocean = gpd.GeoDataFrame(ocean_boundaries) self._land = gpd.GeoDataFrame(land_boundaries) self._interior = gpd.GeoDataFrame(interior_boundaries) def ocean(self) -> gpd.GeoDataFrame: """Retrieve the ocean boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of ocean open boundary. """ self._init_dataframes() return self._ocean def land(self): """Retrieve the land boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of land boundary. """ self._init_dataframes() return self._land def interior(self): """Retrieve the island boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of island boundary. """ self._init_dataframes() return self._interior @property def data(self) -> Dict[Optional[int], Any]: """Read-only property referencing the boundary data dictionary""" return self._data @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ self._init_dataframes() data = [] for bnd in self.ocean().itertuples(): data.append({ 'id': bnd.id, 'ibtype': None, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.land().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.interior().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def __len__(self) -> int: """Returns the number of boundary segments""" return len(self()) def auto_generate( self, threshold: float = 0., land_ibtype: int = 0, interior_ibtype: int = 1, ): """Automatically detect boundaries based on elevation data. Parameters ---------- threshold : float, default=0 Threshold above which nodes are considered dry nodes for ocean vs land boundary detection land_ibtype : int, default=0 Value to assign to land boundary type interior_ibtype : int, default=1 Value to assign to island boundary type Returns ------- None Raises ------ ValueError If any of the values assigned to a mesh node is `np.nan`. Notes ----- An edge is considered dry if any of the attached nodes are dry (its elevation is larger than or equal to the `threshold`). """ values = self.mesh.value if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") coords = self.mesh.msh_t.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} polys = utils.get_mesh_polygons(self.mesh.msh_t) # TODO: Split using shapely to get bdry segments boundaries = defaultdict(defaultdict) bdry_type = dict get_id = self.mesh.nodes.get_id_by_index # generate exterior boundaries for poly in polys: ext_ring_coo = poly.exterior.coords ext_ring = np.array([ (coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[e + 1]]) for e, coo in enumerate(ext_ring_coo[:-1])]) # find boundary edges edge_tag = np.full(ext_ring.shape, 0) edge_tag[ np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1 edge_tag[ np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1 edge_tag[ np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1 edge_tag[ np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1 # sort boundary edges ocean_boundary = [] land_boundary = [] for i, (e0, e1) in enumerate(edge_tag): if np.any(np.asarray((e0, e1)) == 1): land_boundary.append(tuple(ext_ring[i, :])) elif np.any(np.asarray((e0, e1)) == -1): ocean_boundary.append(tuple(ext_ring[i, :])) # ocean_boundaries = utils.sort_edges(ocean_boundary) # land_boundaries = utils.sort_edges(land_boundary) ocean_boundaries = [] if len(ocean_boundary) != 0: #pylint: disable=not-an-iterable ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist()) ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs ocean_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in ocean_segs] land_boundaries = [] if len(land_boundary) != 0: #pylint: disable=not-an-iterable land_segs = linemerge(coords[np.array(land_boundary)].tolist()) land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs land_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in land_segs] _bnd_id = len(boundaries[None]) for bnd in ocean_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[None][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # add land boundaries _bnd_id = len(boundaries[land_ibtype]) for bnd in land_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[land_ibtype][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # generate interior boundaries _bnd_id = 0 interior_boundaries = defaultdict() for poly in polys: interiors = poly.interiors for interior in interiors: int_ring_coo = interior.coords int_ring = [ (coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[e + 1]]) for e, coo in enumerate(int_ring_coo[:-1])] # TODO: Do we still need these? e0, e1 = [list(t) for t in zip(*int_ring)] if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0: e0 = e0[::-1] e1 = e1[::-1] e0 = [get_id(vert) for vert in e0] e0.append(e0[0]) interior_boundaries[_bnd_id] = e0 _bnd_id += 1 for bnd_id, data in interior_boundaries.items(): boundaries[interior_ibtype][bnd_id] = bdry_type( indexes=data, properties={}) self._data = boundaries self._init_dataframes.cache_clear() self.__call__.cache_clear() self._init_dataframes() SortedRingType = Dict[int, Dict[Literal['exterior', 'interiors'], Union[npt.NDArray, List[npt.NDArray]]] ] def sort_rings( index_rings: List[List[Tuple[int, int]]], vertices: npt.NDArray[np.float32]) -> SortedRingType: """Sorts a list of index-rings. Takes a list of unsorted index rings and sorts them into "exterior" and "interior" components. Any doubly-nested rings are considered exterior rings. Parameters ---------- index_rings : List[List[Tuple[int, int]]] Unosorted list of list of mesh edges as specified by end node indexs of each edge. vertices : npt.NDArray[np.float32] 2D ``n x 2`` array of node coordinate couples. Returns ------- SortedRingType Dictionary of information aboout polygon boundaries extracted based on the input Notes ----- The return value is a mapping of ring index to dictionary containing exterior and interior linear ring information as numpy array This function is not currently used, instead a different faster approach is used for boundary and polygon calculation from elements. """ # TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can # probably be optimized using shapely. # sort index_rings into corresponding "polygons" areas = [] for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = {} _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = [] for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = [] for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings def _mesh_interpolate_worker( coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[str, Path], chunk_size: Optional[int], method: Literal['spline', 'linear', 'nearest'] = "spline", filter_by_shape: bool = False): """Interpolator worker function to be used in parallel calls Parameters ---------- coords : npt.NDArray[np.float32] Mesh node coordinates. coords_crs : CRS Coordinate reference system of the input mesh coordinates. raster_path : str or Path Path to the raster temporary working file. chunk_size : int or None Chunk size for windowing over the raster. method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- idxs : npt.NDArray[bool] Mask of the nodes whose values are updated by current interpolation values : npt.NDArray[np.float32] Interpolated values. Raises ------ ValueError If specified interpolation `method` is not supported. """ coords = np.array(coords) raster = Raster(raster_path) idxs = [] values = [] for window in raster.iter_windows(chunk_size=chunk_size, overlap=2): if not raster.crs.equals(coords_crs): transformer = Transformer.from_crs( coords_crs, raster.crs, always_xy=True) # pylint: disable=E0633 coords[:, 0], coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) xi = raster.get_x(window) yi = raster.get_y(window) # Use masked array to ignore missing values from DEM zi = raster.get_values(window=window, masked=True) if not filter_by_shape: _idxs = np.logical_and( np.logical_and( np.min(xi) <= coords[:, 0], np.max(xi) >= coords[:, 0]), np.logical_and( np.min(yi) <= coords[:, 1], np.max(yi) >= coords[:, 1])) else: shape = raster.get_multipolygon() gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1]) _idxs = gs_pt.intersects(shape) interp_mask = None if method == 'spline': f = RectBivariateSpline( xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0, # bbox=[min(x), max(x), min(y), max(y)] # ?? ) _values = f.ev(coords[_idxs, 0], coords[_idxs, 1]) elif method in ['nearest', 'linear']: # Inspired by StackOverflow 35807321 if np.any(zi.mask): m_interp = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method ) # Pick nodes NOT "contaminated" by masked values interp_mask = m_interp(coords[_idxs]) > 0 f = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi).T, method=method ) _values = f(coords[_idxs]) else: raise ValueError( f"Invalid value method specified <{method}>!") if interp_mask is not None: # pylint: disable=invalid-unary-operand-type helper = np.ones_like(_values).astype(bool) helper[interp_mask] = False # _idxs is inverse mask _idxs[_idxs] = helper _values = _values[~interp_mask] idxs.append(_idxs) values.append(_values) return (np.hstack(idxs), np.hstack(values))
__call__
Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values.
"""This module defines classes that handle mesh and mesh operations. This module defines a factory class for mesh, similar to geometry and size function factory class. It also defines concrete mesh types. Currently two concrete mesh types are defined for generic Eucledian mesh and specific 2D Eucledian mesh. """ from functools import lru_cache import logging from multiprocessing import Pool, cpu_count import os import pathlib from collections import defaultdict import warnings from typing import Union, List, Tuple, Dict, Any, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal import pandas as pd import geopandas as gpd from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk from matplotlib.path import Path from matplotlib.transforms import Bbox from matplotlib.tri import Triangulation from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np import numpy.typing as npt from pyproj import CRS, Transformer from scipy.interpolate import ( RectBivariateSpline, RegularGridInterpolator) from shapely.geometry import ( LineString, box, Polygon, MultiPolygon) from shapely.ops import polygonize, linemerge from ocsmesh import utils from ocsmesh.raster import Raster from ocsmesh.mesh.base import BaseMesh from ocsmesh.mesh.parsers import grd, sms2dm _logger = logging.getLogger(__name__) class EuclideanMesh(BaseMesh): """Generic Euclidean mesh class This is the base class for 2D or 3D Euclidean mesh. Attributes ---------- tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t] Reference to underlying jigsaw mesh's triangle element structure. triangles : npt.NDArray[np.float32] Array of node index for triangular elements. quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t] Reference to underlying jigsaw mesh's quadrangle element structure. quads : npt.NDArray[np.float32] Array of node index for quadrangular elements. crs : CRS Coodrinate reference system of the mesh object hull : Hull Handle to hull calculation helper object nodes : Nodes Handle to node handler helper object elements : Elements Handle to element handler helper object Methods ------- write(path, overwrite=False, format='grd') Export mesh object to the disk in the specified format. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type. """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID != 'euclidean-mesh': raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, ' "but expected 'euclidean-mesh'.") if not hasattr(mesh, 'crs'): warnings.warn('Input mesh has no CRS information.') mesh.crs = None else: if not isinstance(mesh.crs, CRS): raise ValueError(f'crs property must be of type {CRS}, not ' f'type {type(mesh.crs)}.') self._hull = None self._nodes = None self._elements = None self._msh_t = mesh def write( self, path: Union[str, os.PathLike], overwrite: bool = False, format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622 ) -> None: """Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported. """ path = pathlib.Path(path) if path.exists() and overwrite is not True: raise IOError( f'File {str(path)} exists and overwrite is not True.') if format == 'grd': grd_dict = utils.msh_t_to_grd(self.msh_t) if self._boundaries and self._boundaries.data: grd_dict.update(boundaries=self._boundaries.data) grd.write(grd_dict, path, overwrite) elif format == '2dm': sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite) elif format == 'msh': savemsh(str(path), self.msh_t) elif format == 'vtk': savevtk(str(path), self.msh_t) else: raise ValueError(f'Unhandled format {format}.') @property def tria3(self): """Reference to underlying mesh tirangle element structure""" return self.msh_t.tria3 @property def triangles(self): """Reference to underlying mesh triangle element index array""" return self.msh_t.tria3['index'] @property def quad4(self): """Reference to underlying mesh quadrangle element structure""" return self.msh_t.quad4 @property def quads(self): """Reference to underlying mesh quadrangle element index array""" return self.msh_t.quad4['index'] @property def crs(self): """Reference to underlying mesh crs""" return self.msh_t.crs @property def hull(self): """Reference to hull calculator helper object""" if self._hull is None: self._hull = Hull(self) return self._hull @property def nodes(self): """Reference to node handler helper object""" if self._nodes is None: self._nodes = Nodes(self) return self._nodes @property def elements(self): """Reference to element handler helper object""" if self._elements is None: self._elements = Elements(self) return self._elements class EuclideanMesh2D(EuclideanMesh): """2D Euclidean mesh definition Attributes ---------- boundaries vert2 value bbox Methods ------- get_bbox(crs=None, output_type=None) Gets the bounding box of the mesh elements. tricontourf(**kwargs) Create a contour plot from the value data on the nodes of the mesh interpolate(raster, method='spline', nprocs=None) Interpolate raster date on the nodes. get_contour(level) Get contour lines from node value data at specified levels. get_multipolygon(zmin=None, zmax=None) Get multipolygon of the mesh hull. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``. """ super().__init__(mesh) self._boundaries = None if mesh.ndims != +2: raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, ' "but expected ndims=2.") if len(self.msh_t.value) == 0: self.msh_t.value = np.array( np.full((self.vert2['coord'].shape[0], 1), np.nan)) def get_bbox( self, crs: Union[str, CRS, None] = None, output_type: Literal[None, 'polygon', 'bbox'] = None ) -> Union[Polygon, Bbox]: """Get the bounding box of mesh elements. Parameters ---------- crs : str or CRS or None, default=None CRS to transform the calculated bounding box into before returning output_type : { None, 'polygon', 'bbox'}, default=None Output type Returns ------- Polygon or Bbox Bounding box of the mesh elements. """ output_type = 'polygon' if output_type is None else output_type xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0]) ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1]) crs = self.crs if crs is None else crs if crs is not None: if not self.crs.equals(crs): transformer = Transformer.from_crs( self.crs, crs, always_xy=True) # pylint: disable=E0633 (xmin, xmax), (ymin, ymax) = transformer.transform( (xmin, xmax), (ymin, ymax)) if output_type == 'polygon': # pylint: disable=R1705 return box(xmin, ymin, xmax, ymax) elif output_type == 'bbox': return Bbox([[xmin, ymin], [xmax, ymax]]) raise TypeError( 'Argument output_type must a string literal \'polygon\' or ' '\'bbox\'') @property def boundaries(self): """Handle to boundaries calculator helper object""" if self._boundaries is None: self._boundaries = Boundaries(self) return self._boundaries def tricontourf(self, **kwargs) -> Axes: """Generate contour for the data of triangular elements of the mesh Parameters ---------- **kwargs : dict, optional Passed to underlying `matplotlib` API. Returns ------- Axes Axes on which the filled contour is drawn. """ return utils.tricontourf(self.msh_t, **kwargs) def interpolate( self, raster: Union[Raster, List[Raster]], method: Literal['spline', 'linear', 'nearest'] = 'spline', nprocs: Optional[int] = None, info_out_path: Union[pathlib.Path, str, None] = None, filter_by_shape: bool = False ) -> None: """Interplate values from raster inputs to the mesh nodes. Parameters ---------- raster : Raster or list of Raster A single or a list of rasters from which values are interpolated onto the mesh method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. nprocs : int or None, default=None Number of workers to use when interpolating data. info_out_path : pathlike or str or None Path for the output node interpolation information file filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- None """ if isinstance(raster, Raster): raster = [raster] nprocs = -1 if nprocs is None else nprocs nprocs = cpu_count() if nprocs == -1 else nprocs # Fix an issue on Jupyter notebook where having pool execute # interpolation even in case of nprocs == 1 would results in # application getting stuck if nprocs > 1: with Pool(processes=nprocs) as pool: res = pool.starmap( _mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] ) pool.join() else: res = [_mesh_interpolate_worker( self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] values = self.msh_t.value.flatten() interp_info_map = {} for (mask, _values), rast in zip(res, raster): values[mask] = _values if info_out_path is not None: vert_cs = None rast_crs = rast.crs if rast_crs.is_vertical: if rast_crs.sub_crs_list is not None: for sub_crs in rast_crs.sub_crs_list: if sub_crs.is_vertical: # TODO: What if sub CRS is compound, etc.? vert_cs = sub_crs elif rast_crs.source_crs is not None: if rast_crs.source_crs.is_vertical: # TODO: What if source CRS is compound, etc.? vert_cs = rast_crs.source_crs vert_cs_name = vert_cs.name idxs = np.argwhere(mask).ravel() interp_info_map.update({ idx: (rast.path, vert_cs_name) for idx in idxs}) if info_out_path is not None: coords = self.msh_t.vert2['coord'].copy() geo_coords = coords.copy() if not self.crs.is_geographic: transformer = Transformer.from_crs( self.crs, CRS.from_epsg(4326), always_xy=True) # pylint: disable=E0633 geo_coords[:, 0], geo_coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) vd_idxs=np.array(list(interp_info_map.keys())) df_interp_info = pd.DataFrame( index=vd_idxs, data={ 'x': coords[vd_idxs, 0], 'y': coords[vd_idxs, 1], 'lat': geo_coords[vd_idxs, 0], 'lon': geo_coords[vd_idxs, 1], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()] } ) df_interp_info.sort_index().to_csv( info_out_path, header=False, index=True) self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) def get_contour(self, level: float) -> LineString: """Extract contour lines at the specified `level` from mesh values Parameters ---------- level : float The level at which contour lines must be extracted. Returns ------- LineString Extracted and merged contour lines. Raises ------ ValueError If mesh has nodes that have null value `np.nan`. """ # ONLY SUPPORTS TRIANGLES for attr in ['quad4', 'hexa8']: if len(getattr(self.msh_t, attr)) > 0: warnings.warn( 'Mesh contour extraction only supports triangles') coords = self.msh_t.vert2['coord'] values = self.msh_t.value trias = self.msh_t.tria3['index'] if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") x, y = coords[:, 0], coords[:, 1] features = [] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) _logger.debug('Computing contours...') fig, ax = plt.subplots() ax.tricontour( x, y, trias, values.ravel(), levels=[level]) plt.close(fig) for path_collection in ax.collections: for path in path_collection.get_paths(): try: features.append(LineString(path.vertices)) except ValueError: # LineStrings must have at least 2 coordinate tuples pass return linemerge(features) def get_multipolygon( self, zmin: Optional[float] = None, zmax: Optional[float] = None ) -> MultiPolygon: """Calculate multipolygon covering mesh elements (hull) Parameters ---------- zmin : float or None Minimum elevation to consider for multipolygon extraction zmax : float or None Maximum elevation to consider for multipolygon extraction Returns ------- MultiPolygon Calculated multipolygon shape """ values = self.msh_t.value mask = np.ones(values.shape) if zmin is not None: mask = np.logical_and(mask, values > zmin) if zmax is not None: mask = np.logical_and(mask, values < zmax) # Assuming value is of shape (N, 1) # ravel to make sure it's 1D verts_in = np.argwhere(mask).ravel() clipped_mesh = utils.clip_mesh_by_vertex( self.msh_t, verts_in, can_use_other_verts=True) boundary_edges = utils.get_boundary_edges(clipped_mesh) coords = clipped_mesh.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} poly_gen = polygonize(coords[boundary_edges]) polys = list(poly_gen) polys = sorted(polys, key=lambda p: p.area, reverse=True) rings = [p.exterior for p in polys] n_parents = np.zeros((len(rings),)) represent = np.array([r.coords[0] for r in rings]) for e, ring in enumerate(rings[:-1]): path = Path(ring.coords, closed=True) n_parents = n_parents + np.pad( np.array([ path.contains_point(pt) for pt in represent[e+1:]]), (e+1, 0), 'constant', constant_values=0) # Get actual polygons based on logic described above polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2] return MultiPolygon(polys) @property def vert2(self): """Reference to underlying mesh 2D vertices structure""" return self.msh_t.vert2 @property def value(self): """Reference to underlying mesh values""" return self.msh_t.value @property def bbox(self): """Calculates and returns bounding box of the mesh hull. See Also -------- get_bbox """ return self.get_bbox() MeshType = Union[EuclideanMesh2D] class Mesh(BaseMesh): """Mesh object factory Factory class that creates and returns concrete mesh object based on the input types. Methods ------- open(path, crs=None) Read mesh data from a file on disk. """ def __new__(cls, mesh: jigsaw_msh_t) -> MeshType: """Construct a concrete mesh object. Parameters ---------- mesh : jigsaw_msh_t Input jigsaw mesh object Returns ------- MeshType Mesh object created from the input Raises ------ TypeError Input `mesh` is not a `jigsaw_msh_t` object. NotImplementedError Input `mesh` object cannot be used to create a EuclideanMesh2D """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID == 'euclidean-mesh': if mesh.ndims == 2: return EuclideanMesh2D(mesh) raise NotImplementedError( f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not ' 'handled.') raise NotImplementedError(f'mshID={mesh.mshID} not handled.') @staticmethod def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType: """Read mesh from a file on disk Parameters ---------- path : path-like Path to the file containig mesh. crs : CRS or None, default=None CRS of the mesh in the path. Overwrites any info read from file, no transformation is done. Returns ------- MeshType Mesh object created by reading the file. Raises ------ TypeError If cannot determine the input mesh type. Notes ----- Currently only SMS-2DM and GRD formats are supported for reading. """ try: msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs)) msh_t.value = np.negative(msh_t.value) return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 if 'not a valid grd file' in str(e): pass else: raise e try: return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs))) except ValueError: pass try: msh_t = jigsaw_msh_t() loadmsh(msh_t, path) msh_t.crs = crs return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 pass raise TypeError( f'Unable to automatically determine file type for {str(path)}.') class Rings: """Helper class for handling mesh rings. This is a helper class to manage the calculation of internal and external rings of the mesh polygon or hull. Attributes ---------- Methods ------- __call__() Returns all rings of the mesh hull interior() Return the interior rings of the mesh hull exterior() Return the exterior rings of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the ring calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates rings. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ polys = utils.get_mesh_polygons(self.mesh.msh_t) data = [] bnd_id = 0 for poly in polys: data.append({ "geometry": poly.exterior, "bnd_id": bnd_id, "type": 'exterior' }) for interior in poly.interiors: data.append({ "geometry": interior, "bnd_id": bnd_id, "type": 'interior' }) bnd_id = bnd_id + 1 return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Extracts the exterior ring from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior ring of the mesh hull polygon. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Extracts the interior rings from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior rings of the mesh hull polygon. """ return self().loc[self()['type'] == 'interior'] class Edges: """Helper class for handling mesh boundary edges. Attributes ---------- Methods ------- __call__() Return all boundary edges of the mesh hull interior() Return the interior boundary edges of the mesh hull exterior() Return the exterior boundary edges of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the edge calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which boundary edges are calculated. """ self.mesh = mesh # MASKED: __call__ function (lines 810-841) def exterior(self) -> gpd.GeoDataFrame: """Retruns exterior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Retruns interior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'interior'] class Hull: """Helper class for handling mesh hull calculations. This class wraps the functionality of ring and edge classes and adds additional methods to calculate or extract the polygon or triangulation of the mesh Attributes ---------- Methods ------- __call__() Calculates all the polys from all mesh rings exterior() Calculates the exterior rings of the mesh hull. interior() Calculates the interior rings of the mesh hull. implode() Calculates all the polygons (including isolated domain islands) in the mesh and returns a table of polygons. multipolygon() Calculates all the polygons (including isolated domain islands) in the mesh and returns a multipolygon. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize helper class for handling mesh hull calculations Parameters ---------- mesh : EuclideanMesh Input mesh for which hull calculations are done. Notes ----- This object holds onto the ring and edge calculator objects as well as a reference to the input mesh. """ self.mesh = mesh self.rings = Rings(mesh) self.edges = Edges(mesh) @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for bnd_id in np.unique(self.rings()['bnd_id'].tolist()): exterior = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior')] interiors = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior')] data.append({ "geometry": Polygon( exterior.iloc[0].geometry.coords, [row.geometry.coords for _, row in interiors.iterrows()]), "bnd_id": bnd_id }) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Creates polygons from exterior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from exterior rings of the mesh hull """ data = [] for exterior in self.rings().loc[ self.rings()['type'] == 'exterior'].itertuples(): data.append({"geometry": Polygon(exterior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def interior(self) -> gpd.GeoDataFrame: """Creates polygons from interior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from interior rings of the mesh hull """ data = [] for interior in self.rings().loc[ self.rings()['type'] == 'interior'].itertuples(): data.append({"geometry": Polygon(interior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def implode(self) -> gpd.GeoDataFrame: """Creates a dataframe from mesh polygons. Parameters ---------- Returns ------ gpd.GeoDataFrame Dataframe containing polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The difference of the return value of this method and `__call__` is that the `implode` returns a dataframe with a single `MultiPolygon` where as `__call__` returns a dataframe with multiple `Polygon` entries with associated `bnd_id`. """ return gpd.GeoDataFrame( {"geometry": MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) def multipolygon(self) -> MultiPolygon: """Returns mesh multi-polygons. Parameters ---------- Returns ------ MultiPolygon Combined shape of polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. implode() Dataframe with a single combined multipolygon of the mesh polygons. Notes ----- The difference of the return value of this method and `implode` is that `multipolygon` returns a `MultiPolygon` object where as `implode` returns a dataframe warpping the multipolygon object. """ mp = self.implode().iloc[0].geometry if isinstance(mp, Polygon): mp = MultiPolygon([mp]) return mp def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.mesh.msh_t.tria3['index'].tolist() for quad in self.mesh.msh_t.quad4['index']: triangles.extend([ [quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]] ]) return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) class Nodes: """Helper class for handling mesh nodes. Attributes ---------- id_to_index : dict Mapping to convert node IDs to node indexes. index_to_id : dict Mapping to convert node indexes to node IDs. Methods ------- __call__() Creates a mapping between node IDs (index + 1) and node coordinates id() Returns list of node IDs. index() Return array of node indices. coords() Return mesh coordinates. values() Return values stored for mesh nodes. get_index_by_id(node_id) Get the node index based on node ID. get_id_by_index(index) Get the node ID based on the node index. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes node handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles nodes info. """ self.mesh = mesh self._id_to_index = None self._index_to_id = None @lru_cache(maxsize=1) def __call__(self) -> Dict[int, int]: """Creates a mapping between node IDs and indexes. Parameters ---------- Returns ------- dict Mapping between node IDs and indexes. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return {i+1: coord for i, coord in enumerate(self.coords())} def id(self) -> List[int]: """Retrives a list of element IDs. Parameters ---------- Returns ------- list of int List of node IDs as created by `__call__` """ return list(self().keys()) def index(self) -> npt.NDArray[int]: """Retrives an array of element indexes. Parameters ---------- Returns ------- array-like Array of node indexes. """ return np.arange(len(self())) def coords(self) -> npt.NDArray[np.float32]: """Retrieve the coordinates of mesh nodes Parameters ---------- Returns ------- array-like Coordinates of the mesh nodes as returned by `BaseMesh.coord` """ return self.mesh.coord def values(self): """Retrieve the values stored for mesh nodes Parameters ---------- Returns ------- array-like Values on the mesh nodes as returned by `BaseMesh.values` """ return self.mesh.values def get_index_by_id(self, node_id): """Converts mesh ID to mesh index. Parameters ---------- node_id : int ID of the node of interest Returns ------- int Index of the node of interest """ return self.id_to_index[node_id] def get_id_by_index(self, index: int): """Converts mesh index to mesh ID. Parameters ---------- index : int Index of the node of interest. Returns ------- int ID of the node of interest """ return self.index_to_id[index] @property def id_to_index(self) -> Dict[int, int]: """Read-only property returning the mapping of ID to index Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._id_to_index is None: self._id_to_index = {node_id: index for index, node_id in enumerate(self().keys())} return self._id_to_index @property def index_to_id(self) -> Dict[int, int]: """Read-only property returning the mapping of index to ID Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._index_to_id is None: self._index_to_id = dict(enumerate(self().keys())) return self._index_to_id # def get_indexes_around_index(self, index): # indexes_around_index = self.__dict__.get('indexes_around_index') # if indexes_around_index is None: # def append(geom): # for simplex in geom: # for i, j in permutations(simplex, 2): # indexes_around_index[i].add(j) # indexes_around_index = defaultdict(set) # append(self.gr3.elements.triangles()) # append(self.gr3.elements.quads()) # self.__dict__['indexes_around_index'] = indexes_around_index # return list(indexes_around_index[index]) class Elements: """Helper class for handling mesh elements. Attributes ---------- Methods -------- __call__() Creates a mapping between element IDs and associated node IDs. id() Returns a list of element IDs. index() Returns an array of element indexes. array() Creates and returns a masked array of element node indices. triangles() Creates and returns a 2D array of triangular element node indices. quads() Creates and returns a 2D array of quadrangular element node indices. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. geodataframe() Creates and returns a dataframe of with polygon entires for each element. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize the element handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles elements info. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> Dict[int, npt.NDArray[int]]: """Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ elements = {i+1: index+1 for i, index in enumerate(self.mesh.msh_t.tria3['index'])} elements.update({i+len(elements)+1: index+1 for i, index in enumerate(self.mesh.msh_t.quad4['index'])}) return elements @lru_cache(maxsize=1) def id(self) -> List[int]: """Retrieves the list of element IDs as returned by `__call__` Parameters ---------- Returns ------- list of int List of element IDs. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return list(self().keys()) @lru_cache(maxsize=1) def index(self) -> npt.NDArray[int]: """Retrieves an array of element indices Parameters ---------- Returns ------- npt.NDArray 1D array of element indices. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.arange(len(self())) def array(self) -> npt.NDArray[int]: """Retrieves a masked array of element node IDs. The return value is ``n x m`` where ``n`` is the number of elements and ``m`` is the maximum number of element nodes, e.g. if there are only trias, then it's 3, for trias and quads it is 4. Parameters ---------- Returns ------- npt.NDArray Masked array where elements with fewer associated nodes have trailing masked node columns in the array. """ rank = int(max(map(len, self().values()))) array = np.full((len(self()), rank), -1) for i, elem_nd_ids in enumerate(self().values()): row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))) array[i, :len(row)] = row return np.ma.masked_equal(array, -1) @lru_cache(maxsize=1) def triangles(self) -> npt.NDArray[int]: """Retrieves an array of tria element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for triangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 3]) @lru_cache(maxsize=1) def quads(self): """Retrieves an array of quad element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for quadrangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 4]) def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.triangles().tolist() for quad in self.quads(): # TODO: Not tested. triangles.append([quad[0], quad[1], quad[3]]) triangles.append([quad[1], quad[2], quad[3]]) return Triangulation( self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) def geodataframe(self) -> gpd.GeoDataFrame: """Create polygons for each element and return in dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe created from entries of `Polygon` type for each element. """ data = [] for elem_id, elem_nd_ids in self().items(): data.append({ 'geometry': Polygon( self.mesh.coord[list( map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) class Boundaries: """Helper class for mesh boundary condition calculation Attributes ---------- data : dict Mapping for boundary information Methods ------- __call__() Retrieves a dataframe for all boundary shapes and type info. __len__() Gets the number of calculated boundary segments. ocean() Retrieves a dataframe containing shapes and type info of ocean boundaries land() Retrieves a dataframe containing shapes and type info of land boundaries interior() Retrieves a dataframe containing shapes and type info of island boundaries auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1) Automatically generate boundary information based on the input land indicator `threshold` """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize boundary helper object Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates boundaries. """ # TODO: Add a way to manually initialize self.mesh = mesh self._ocean = gpd.GeoDataFrame() self._land = gpd.GeoDataFrame() self._interior = gpd.GeoDataFrame() self._data = defaultdict(defaultdict) @lru_cache(maxsize=1) def _init_dataframes(self) -> None: """Internal: Creates boundary dataframes based on boundary data Parameters ---------- Returns ------- None Notes ----- This method doesn't have any return value, but it is cached so that on re-execution it doesn't recalculate. """ boundaries = self._data ocean_boundaries = [] land_boundaries = [] interior_boundaries = [] if boundaries is not None: for ibtype, bnds in boundaries.items(): if ibtype is None: for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) ocean_boundaries.append({ 'id': bnd_id, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) elif str(ibtype).endswith('1'): for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) interior_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) else: for bnd_id, data in bnds.items(): _indexes = np.array(data['indexes']) if _indexes.ndim > 1: # ndim > 1 implies we're dealing with an ADCIRC # mesh that includes boundary pairs, such as weir new_indexes = [] for i, line in enumerate(_indexes.T): if i % 2 != 0: new_indexes.extend(np.flip(line)) else: new_indexes.extend(line) _indexes = np.array(new_indexes).flatten() else: _indexes = _indexes.flatten() indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes)) land_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) self._ocean = gpd.GeoDataFrame(ocean_boundaries) self._land = gpd.GeoDataFrame(land_boundaries) self._interior = gpd.GeoDataFrame(interior_boundaries) def ocean(self) -> gpd.GeoDataFrame: """Retrieve the ocean boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of ocean open boundary. """ self._init_dataframes() return self._ocean def land(self): """Retrieve the land boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of land boundary. """ self._init_dataframes() return self._land def interior(self): """Retrieve the island boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of island boundary. """ self._init_dataframes() return self._interior @property def data(self) -> Dict[Optional[int], Any]: """Read-only property referencing the boundary data dictionary""" return self._data @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ self._init_dataframes() data = [] for bnd in self.ocean().itertuples(): data.append({ 'id': bnd.id, 'ibtype': None, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.land().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.interior().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def __len__(self) -> int: """Returns the number of boundary segments""" return len(self()) def auto_generate( self, threshold: float = 0., land_ibtype: int = 0, interior_ibtype: int = 1, ): """Automatically detect boundaries based on elevation data. Parameters ---------- threshold : float, default=0 Threshold above which nodes are considered dry nodes for ocean vs land boundary detection land_ibtype : int, default=0 Value to assign to land boundary type interior_ibtype : int, default=1 Value to assign to island boundary type Returns ------- None Raises ------ ValueError If any of the values assigned to a mesh node is `np.nan`. Notes ----- An edge is considered dry if any of the attached nodes are dry (its elevation is larger than or equal to the `threshold`). """ values = self.mesh.value if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") coords = self.mesh.msh_t.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} polys = utils.get_mesh_polygons(self.mesh.msh_t) # TODO: Split using shapely to get bdry segments boundaries = defaultdict(defaultdict) bdry_type = dict get_id = self.mesh.nodes.get_id_by_index # generate exterior boundaries for poly in polys: ext_ring_coo = poly.exterior.coords ext_ring = np.array([ (coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[e + 1]]) for e, coo in enumerate(ext_ring_coo[:-1])]) # find boundary edges edge_tag = np.full(ext_ring.shape, 0) edge_tag[ np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1 edge_tag[ np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1 edge_tag[ np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1 edge_tag[ np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1 # sort boundary edges ocean_boundary = [] land_boundary = [] for i, (e0, e1) in enumerate(edge_tag): if np.any(np.asarray((e0, e1)) == 1): land_boundary.append(tuple(ext_ring[i, :])) elif np.any(np.asarray((e0, e1)) == -1): ocean_boundary.append(tuple(ext_ring[i, :])) # ocean_boundaries = utils.sort_edges(ocean_boundary) # land_boundaries = utils.sort_edges(land_boundary) ocean_boundaries = [] if len(ocean_boundary) != 0: #pylint: disable=not-an-iterable ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist()) ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs ocean_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in ocean_segs] land_boundaries = [] if len(land_boundary) != 0: #pylint: disable=not-an-iterable land_segs = linemerge(coords[np.array(land_boundary)].tolist()) land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs land_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in land_segs] _bnd_id = len(boundaries[None]) for bnd in ocean_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[None][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # add land boundaries _bnd_id = len(boundaries[land_ibtype]) for bnd in land_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[land_ibtype][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # generate interior boundaries _bnd_id = 0 interior_boundaries = defaultdict() for poly in polys: interiors = poly.interiors for interior in interiors: int_ring_coo = interior.coords int_ring = [ (coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[e + 1]]) for e, coo in enumerate(int_ring_coo[:-1])] # TODO: Do we still need these? e0, e1 = [list(t) for t in zip(*int_ring)] if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0: e0 = e0[::-1] e1 = e1[::-1] e0 = [get_id(vert) for vert in e0] e0.append(e0[0]) interior_boundaries[_bnd_id] = e0 _bnd_id += 1 for bnd_id, data in interior_boundaries.items(): boundaries[interior_ibtype][bnd_id] = bdry_type( indexes=data, properties={}) self._data = boundaries self._init_dataframes.cache_clear() self.__call__.cache_clear() self._init_dataframes() SortedRingType = Dict[int, Dict[Literal['exterior', 'interiors'], Union[npt.NDArray, List[npt.NDArray]]] ] def sort_rings( index_rings: List[List[Tuple[int, int]]], vertices: npt.NDArray[np.float32]) -> SortedRingType: """Sorts a list of index-rings. Takes a list of unsorted index rings and sorts them into "exterior" and "interior" components. Any doubly-nested rings are considered exterior rings. Parameters ---------- index_rings : List[List[Tuple[int, int]]] Unosorted list of list of mesh edges as specified by end node indexs of each edge. vertices : npt.NDArray[np.float32] 2D ``n x 2`` array of node coordinate couples. Returns ------- SortedRingType Dictionary of information aboout polygon boundaries extracted based on the input Notes ----- The return value is a mapping of ring index to dictionary containing exterior and interior linear ring information as numpy array This function is not currently used, instead a different faster approach is used for boundary and polygon calculation from elements. """ # TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can # probably be optimized using shapely. # sort index_rings into corresponding "polygons" areas = [] for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = {} _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = [] for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = [] for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings def _mesh_interpolate_worker( coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[str, Path], chunk_size: Optional[int], method: Literal['spline', 'linear', 'nearest'] = "spline", filter_by_shape: bool = False): """Interpolator worker function to be used in parallel calls Parameters ---------- coords : npt.NDArray[np.float32] Mesh node coordinates. coords_crs : CRS Coordinate reference system of the input mesh coordinates. raster_path : str or Path Path to the raster temporary working file. chunk_size : int or None Chunk size for windowing over the raster. method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- idxs : npt.NDArray[bool] Mask of the nodes whose values are updated by current interpolation values : npt.NDArray[np.float32] Interpolated values. Raises ------ ValueError If specified interpolation `method` is not supported. """ coords = np.array(coords) raster = Raster(raster_path) idxs = [] values = [] for window in raster.iter_windows(chunk_size=chunk_size, overlap=2): if not raster.crs.equals(coords_crs): transformer = Transformer.from_crs( coords_crs, raster.crs, always_xy=True) # pylint: disable=E0633 coords[:, 0], coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) xi = raster.get_x(window) yi = raster.get_y(window) # Use masked array to ignore missing values from DEM zi = raster.get_values(window=window, masked=True) if not filter_by_shape: _idxs = np.logical_and( np.logical_and( np.min(xi) <= coords[:, 0], np.max(xi) >= coords[:, 0]), np.logical_and( np.min(yi) <= coords[:, 1], np.max(yi) >= coords[:, 1])) else: shape = raster.get_multipolygon() gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1]) _idxs = gs_pt.intersects(shape) interp_mask = None if method == 'spline': f = RectBivariateSpline( xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0, # bbox=[min(x), max(x), min(y), max(y)] # ?? ) _values = f.ev(coords[_idxs, 0], coords[_idxs, 1]) elif method in ['nearest', 'linear']: # Inspired by StackOverflow 35807321 if np.any(zi.mask): m_interp = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method ) # Pick nodes NOT "contaminated" by masked values interp_mask = m_interp(coords[_idxs]) > 0 f = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi).T, method=method ) _values = f(coords[_idxs]) else: raise ValueError( f"Invalid value method specified <{method}>!") if interp_mask is not None: # pylint: disable=invalid-unary-operand-type helper = np.ones_like(_values).astype(bool) helper[interp_mask] = False # _idxs is inverse mask _idxs[_idxs] = helper _values = _values[~interp_mask] idxs.append(_idxs) values.append(_values) return (np.hstack(idxs), np.hstack(values))
@lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for ring in self.mesh.hull.rings().itertuples(): coords = ring.geometry.coords for i in range(1, len(coords)): data.append({ "geometry": LineString([coords[i-1], coords[i]]), "bnd_id": ring.bnd_id, "type": ring.type}) return gpd.GeoDataFrame(data, crs=self.mesh.crs)
810
841
"""This module defines classes that handle mesh and mesh operations. This module defines a factory class for mesh, similar to geometry and size function factory class. It also defines concrete mesh types. Currently two concrete mesh types are defined for generic Eucledian mesh and specific 2D Eucledian mesh. """ from functools import lru_cache import logging from multiprocessing import Pool, cpu_count import os import pathlib from collections import defaultdict import warnings from typing import Union, List, Tuple, Dict, Any, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal import pandas as pd import geopandas as gpd from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk from matplotlib.path import Path from matplotlib.transforms import Bbox from matplotlib.tri import Triangulation from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np import numpy.typing as npt from pyproj import CRS, Transformer from scipy.interpolate import ( RectBivariateSpline, RegularGridInterpolator) from shapely.geometry import ( LineString, box, Polygon, MultiPolygon) from shapely.ops import polygonize, linemerge from ocsmesh import utils from ocsmesh.raster import Raster from ocsmesh.mesh.base import BaseMesh from ocsmesh.mesh.parsers import grd, sms2dm _logger = logging.getLogger(__name__) class EuclideanMesh(BaseMesh): """Generic Euclidean mesh class This is the base class for 2D or 3D Euclidean mesh. Attributes ---------- tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t] Reference to underlying jigsaw mesh's triangle element structure. triangles : npt.NDArray[np.float32] Array of node index for triangular elements. quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t] Reference to underlying jigsaw mesh's quadrangle element structure. quads : npt.NDArray[np.float32] Array of node index for quadrangular elements. crs : CRS Coodrinate reference system of the mesh object hull : Hull Handle to hull calculation helper object nodes : Nodes Handle to node handler helper object elements : Elements Handle to element handler helper object Methods ------- write(path, overwrite=False, format='grd') Export mesh object to the disk in the specified format. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type. """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID != 'euclidean-mesh': raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, ' "but expected 'euclidean-mesh'.") if not hasattr(mesh, 'crs'): warnings.warn('Input mesh has no CRS information.') mesh.crs = None else: if not isinstance(mesh.crs, CRS): raise ValueError(f'crs property must be of type {CRS}, not ' f'type {type(mesh.crs)}.') self._hull = None self._nodes = None self._elements = None self._msh_t = mesh def write( self, path: Union[str, os.PathLike], overwrite: bool = False, format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622 ) -> None: """Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported. """ path = pathlib.Path(path) if path.exists() and overwrite is not True: raise IOError( f'File {str(path)} exists and overwrite is not True.') if format == 'grd': grd_dict = utils.msh_t_to_grd(self.msh_t) if self._boundaries and self._boundaries.data: grd_dict.update(boundaries=self._boundaries.data) grd.write(grd_dict, path, overwrite) elif format == '2dm': sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite) elif format == 'msh': savemsh(str(path), self.msh_t) elif format == 'vtk': savevtk(str(path), self.msh_t) else: raise ValueError(f'Unhandled format {format}.') @property def tria3(self): """Reference to underlying mesh tirangle element structure""" return self.msh_t.tria3 @property def triangles(self): """Reference to underlying mesh triangle element index array""" return self.msh_t.tria3['index'] @property def quad4(self): """Reference to underlying mesh quadrangle element structure""" return self.msh_t.quad4 @property def quads(self): """Reference to underlying mesh quadrangle element index array""" return self.msh_t.quad4['index'] @property def crs(self): """Reference to underlying mesh crs""" return self.msh_t.crs @property def hull(self): """Reference to hull calculator helper object""" if self._hull is None: self._hull = Hull(self) return self._hull @property def nodes(self): """Reference to node handler helper object""" if self._nodes is None: self._nodes = Nodes(self) return self._nodes @property def elements(self): """Reference to element handler helper object""" if self._elements is None: self._elements = Elements(self) return self._elements class EuclideanMesh2D(EuclideanMesh): """2D Euclidean mesh definition Attributes ---------- boundaries vert2 value bbox Methods ------- get_bbox(crs=None, output_type=None) Gets the bounding box of the mesh elements. tricontourf(**kwargs) Create a contour plot from the value data on the nodes of the mesh interpolate(raster, method='spline', nprocs=None) Interpolate raster date on the nodes. get_contour(level) Get contour lines from node value data at specified levels. get_multipolygon(zmin=None, zmax=None) Get multipolygon of the mesh hull. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``. """ super().__init__(mesh) self._boundaries = None if mesh.ndims != +2: raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, ' "but expected ndims=2.") if len(self.msh_t.value) == 0: self.msh_t.value = np.array( np.full((self.vert2['coord'].shape[0], 1), np.nan)) def get_bbox( self, crs: Union[str, CRS, None] = None, output_type: Literal[None, 'polygon', 'bbox'] = None ) -> Union[Polygon, Bbox]: """Get the bounding box of mesh elements. Parameters ---------- crs : str or CRS or None, default=None CRS to transform the calculated bounding box into before returning output_type : { None, 'polygon', 'bbox'}, default=None Output type Returns ------- Polygon or Bbox Bounding box of the mesh elements. """ output_type = 'polygon' if output_type is None else output_type xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0]) ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1]) crs = self.crs if crs is None else crs if crs is not None: if not self.crs.equals(crs): transformer = Transformer.from_crs( self.crs, crs, always_xy=True) # pylint: disable=E0633 (xmin, xmax), (ymin, ymax) = transformer.transform( (xmin, xmax), (ymin, ymax)) if output_type == 'polygon': # pylint: disable=R1705 return box(xmin, ymin, xmax, ymax) elif output_type == 'bbox': return Bbox([[xmin, ymin], [xmax, ymax]]) raise TypeError( 'Argument output_type must a string literal \'polygon\' or ' '\'bbox\'') @property def boundaries(self): """Handle to boundaries calculator helper object""" if self._boundaries is None: self._boundaries = Boundaries(self) return self._boundaries def tricontourf(self, **kwargs) -> Axes: """Generate contour for the data of triangular elements of the mesh Parameters ---------- **kwargs : dict, optional Passed to underlying `matplotlib` API. Returns ------- Axes Axes on which the filled contour is drawn. """ return utils.tricontourf(self.msh_t, **kwargs) def interpolate( self, raster: Union[Raster, List[Raster]], method: Literal['spline', 'linear', 'nearest'] = 'spline', nprocs: Optional[int] = None, info_out_path: Union[pathlib.Path, str, None] = None, filter_by_shape: bool = False ) -> None: """Interplate values from raster inputs to the mesh nodes. Parameters ---------- raster : Raster or list of Raster A single or a list of rasters from which values are interpolated onto the mesh method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. nprocs : int or None, default=None Number of workers to use when interpolating data. info_out_path : pathlike or str or None Path for the output node interpolation information file filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- None """ if isinstance(raster, Raster): raster = [raster] nprocs = -1 if nprocs is None else nprocs nprocs = cpu_count() if nprocs == -1 else nprocs # Fix an issue on Jupyter notebook where having pool execute # interpolation even in case of nprocs == 1 would results in # application getting stuck if nprocs > 1: with Pool(processes=nprocs) as pool: res = pool.starmap( _mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] ) pool.join() else: res = [_mesh_interpolate_worker( self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] values = self.msh_t.value.flatten() interp_info_map = {} for (mask, _values), rast in zip(res, raster): values[mask] = _values if info_out_path is not None: vert_cs = None rast_crs = rast.crs if rast_crs.is_vertical: if rast_crs.sub_crs_list is not None: for sub_crs in rast_crs.sub_crs_list: if sub_crs.is_vertical: # TODO: What if sub CRS is compound, etc.? vert_cs = sub_crs elif rast_crs.source_crs is not None: if rast_crs.source_crs.is_vertical: # TODO: What if source CRS is compound, etc.? vert_cs = rast_crs.source_crs vert_cs_name = vert_cs.name idxs = np.argwhere(mask).ravel() interp_info_map.update({ idx: (rast.path, vert_cs_name) for idx in idxs}) if info_out_path is not None: coords = self.msh_t.vert2['coord'].copy() geo_coords = coords.copy() if not self.crs.is_geographic: transformer = Transformer.from_crs( self.crs, CRS.from_epsg(4326), always_xy=True) # pylint: disable=E0633 geo_coords[:, 0], geo_coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) vd_idxs=np.array(list(interp_info_map.keys())) df_interp_info = pd.DataFrame( index=vd_idxs, data={ 'x': coords[vd_idxs, 0], 'y': coords[vd_idxs, 1], 'lat': geo_coords[vd_idxs, 0], 'lon': geo_coords[vd_idxs, 1], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()] } ) df_interp_info.sort_index().to_csv( info_out_path, header=False, index=True) self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) def get_contour(self, level: float) -> LineString: """Extract contour lines at the specified `level` from mesh values Parameters ---------- level : float The level at which contour lines must be extracted. Returns ------- LineString Extracted and merged contour lines. Raises ------ ValueError If mesh has nodes that have null value `np.nan`. """ # ONLY SUPPORTS TRIANGLES for attr in ['quad4', 'hexa8']: if len(getattr(self.msh_t, attr)) > 0: warnings.warn( 'Mesh contour extraction only supports triangles') coords = self.msh_t.vert2['coord'] values = self.msh_t.value trias = self.msh_t.tria3['index'] if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") x, y = coords[:, 0], coords[:, 1] features = [] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) _logger.debug('Computing contours...') fig, ax = plt.subplots() ax.tricontour( x, y, trias, values.ravel(), levels=[level]) plt.close(fig) for path_collection in ax.collections: for path in path_collection.get_paths(): try: features.append(LineString(path.vertices)) except ValueError: # LineStrings must have at least 2 coordinate tuples pass return linemerge(features) def get_multipolygon( self, zmin: Optional[float] = None, zmax: Optional[float] = None ) -> MultiPolygon: """Calculate multipolygon covering mesh elements (hull) Parameters ---------- zmin : float or None Minimum elevation to consider for multipolygon extraction zmax : float or None Maximum elevation to consider for multipolygon extraction Returns ------- MultiPolygon Calculated multipolygon shape """ values = self.msh_t.value mask = np.ones(values.shape) if zmin is not None: mask = np.logical_and(mask, values > zmin) if zmax is not None: mask = np.logical_and(mask, values < zmax) # Assuming value is of shape (N, 1) # ravel to make sure it's 1D verts_in = np.argwhere(mask).ravel() clipped_mesh = utils.clip_mesh_by_vertex( self.msh_t, verts_in, can_use_other_verts=True) boundary_edges = utils.get_boundary_edges(clipped_mesh) coords = clipped_mesh.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} poly_gen = polygonize(coords[boundary_edges]) polys = list(poly_gen) polys = sorted(polys, key=lambda p: p.area, reverse=True) rings = [p.exterior for p in polys] n_parents = np.zeros((len(rings),)) represent = np.array([r.coords[0] for r in rings]) for e, ring in enumerate(rings[:-1]): path = Path(ring.coords, closed=True) n_parents = n_parents + np.pad( np.array([ path.contains_point(pt) for pt in represent[e+1:]]), (e+1, 0), 'constant', constant_values=0) # Get actual polygons based on logic described above polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2] return MultiPolygon(polys) @property def vert2(self): """Reference to underlying mesh 2D vertices structure""" return self.msh_t.vert2 @property def value(self): """Reference to underlying mesh values""" return self.msh_t.value @property def bbox(self): """Calculates and returns bounding box of the mesh hull. See Also -------- get_bbox """ return self.get_bbox() MeshType = Union[EuclideanMesh2D] class Mesh(BaseMesh): """Mesh object factory Factory class that creates and returns concrete mesh object based on the input types. Methods ------- open(path, crs=None) Read mesh data from a file on disk. """ def __new__(cls, mesh: jigsaw_msh_t) -> MeshType: """Construct a concrete mesh object. Parameters ---------- mesh : jigsaw_msh_t Input jigsaw mesh object Returns ------- MeshType Mesh object created from the input Raises ------ TypeError Input `mesh` is not a `jigsaw_msh_t` object. NotImplementedError Input `mesh` object cannot be used to create a EuclideanMesh2D """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID == 'euclidean-mesh': if mesh.ndims == 2: return EuclideanMesh2D(mesh) raise NotImplementedError( f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not ' 'handled.') raise NotImplementedError(f'mshID={mesh.mshID} not handled.') @staticmethod def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType: """Read mesh from a file on disk Parameters ---------- path : path-like Path to the file containig mesh. crs : CRS or None, default=None CRS of the mesh in the path. Overwrites any info read from file, no transformation is done. Returns ------- MeshType Mesh object created by reading the file. Raises ------ TypeError If cannot determine the input mesh type. Notes ----- Currently only SMS-2DM and GRD formats are supported for reading. """ try: msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs)) msh_t.value = np.negative(msh_t.value) return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 if 'not a valid grd file' in str(e): pass else: raise e try: return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs))) except ValueError: pass try: msh_t = jigsaw_msh_t() loadmsh(msh_t, path) msh_t.crs = crs return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 pass raise TypeError( f'Unable to automatically determine file type for {str(path)}.') class Rings: """Helper class for handling mesh rings. This is a helper class to manage the calculation of internal and external rings of the mesh polygon or hull. Attributes ---------- Methods ------- __call__() Returns all rings of the mesh hull interior() Return the interior rings of the mesh hull exterior() Return the exterior rings of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the ring calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates rings. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ polys = utils.get_mesh_polygons(self.mesh.msh_t) data = [] bnd_id = 0 for poly in polys: data.append({ "geometry": poly.exterior, "bnd_id": bnd_id, "type": 'exterior' }) for interior in poly.interiors: data.append({ "geometry": interior, "bnd_id": bnd_id, "type": 'interior' }) bnd_id = bnd_id + 1 return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Extracts the exterior ring from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior ring of the mesh hull polygon. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Extracts the interior rings from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior rings of the mesh hull polygon. """ return self().loc[self()['type'] == 'interior'] class Edges: """Helper class for handling mesh boundary edges. Attributes ---------- Methods ------- __call__() Return all boundary edges of the mesh hull interior() Return the interior boundary edges of the mesh hull exterior() Return the exterior boundary edges of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the edge calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which boundary edges are calculated. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for ring in self.mesh.hull.rings().itertuples(): coords = ring.geometry.coords for i in range(1, len(coords)): data.append({ "geometry": LineString([coords[i-1], coords[i]]), "bnd_id": ring.bnd_id, "type": ring.type}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Retruns exterior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Retruns interior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'interior'] class Hull: """Helper class for handling mesh hull calculations. This class wraps the functionality of ring and edge classes and adds additional methods to calculate or extract the polygon or triangulation of the mesh Attributes ---------- Methods ------- __call__() Calculates all the polys from all mesh rings exterior() Calculates the exterior rings of the mesh hull. interior() Calculates the interior rings of the mesh hull. implode() Calculates all the polygons (including isolated domain islands) in the mesh and returns a table of polygons. multipolygon() Calculates all the polygons (including isolated domain islands) in the mesh and returns a multipolygon. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize helper class for handling mesh hull calculations Parameters ---------- mesh : EuclideanMesh Input mesh for which hull calculations are done. Notes ----- This object holds onto the ring and edge calculator objects as well as a reference to the input mesh. """ self.mesh = mesh self.rings = Rings(mesh) self.edges = Edges(mesh) @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for bnd_id in np.unique(self.rings()['bnd_id'].tolist()): exterior = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior')] interiors = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior')] data.append({ "geometry": Polygon( exterior.iloc[0].geometry.coords, [row.geometry.coords for _, row in interiors.iterrows()]), "bnd_id": bnd_id }) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Creates polygons from exterior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from exterior rings of the mesh hull """ data = [] for exterior in self.rings().loc[ self.rings()['type'] == 'exterior'].itertuples(): data.append({"geometry": Polygon(exterior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def interior(self) -> gpd.GeoDataFrame: """Creates polygons from interior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from interior rings of the mesh hull """ data = [] for interior in self.rings().loc[ self.rings()['type'] == 'interior'].itertuples(): data.append({"geometry": Polygon(interior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def implode(self) -> gpd.GeoDataFrame: """Creates a dataframe from mesh polygons. Parameters ---------- Returns ------ gpd.GeoDataFrame Dataframe containing polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The difference of the return value of this method and `__call__` is that the `implode` returns a dataframe with a single `MultiPolygon` where as `__call__` returns a dataframe with multiple `Polygon` entries with associated `bnd_id`. """ return gpd.GeoDataFrame( {"geometry": MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) def multipolygon(self) -> MultiPolygon: """Returns mesh multi-polygons. Parameters ---------- Returns ------ MultiPolygon Combined shape of polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. implode() Dataframe with a single combined multipolygon of the mesh polygons. Notes ----- The difference of the return value of this method and `implode` is that `multipolygon` returns a `MultiPolygon` object where as `implode` returns a dataframe warpping the multipolygon object. """ mp = self.implode().iloc[0].geometry if isinstance(mp, Polygon): mp = MultiPolygon([mp]) return mp def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.mesh.msh_t.tria3['index'].tolist() for quad in self.mesh.msh_t.quad4['index']: triangles.extend([ [quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]] ]) return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) class Nodes: """Helper class for handling mesh nodes. Attributes ---------- id_to_index : dict Mapping to convert node IDs to node indexes. index_to_id : dict Mapping to convert node indexes to node IDs. Methods ------- __call__() Creates a mapping between node IDs (index + 1) and node coordinates id() Returns list of node IDs. index() Return array of node indices. coords() Return mesh coordinates. values() Return values stored for mesh nodes. get_index_by_id(node_id) Get the node index based on node ID. get_id_by_index(index) Get the node ID based on the node index. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes node handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles nodes info. """ self.mesh = mesh self._id_to_index = None self._index_to_id = None @lru_cache(maxsize=1) def __call__(self) -> Dict[int, int]: """Creates a mapping between node IDs and indexes. Parameters ---------- Returns ------- dict Mapping between node IDs and indexes. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return {i+1: coord for i, coord in enumerate(self.coords())} def id(self) -> List[int]: """Retrives a list of element IDs. Parameters ---------- Returns ------- list of int List of node IDs as created by `__call__` """ return list(self().keys()) def index(self) -> npt.NDArray[int]: """Retrives an array of element indexes. Parameters ---------- Returns ------- array-like Array of node indexes. """ return np.arange(len(self())) def coords(self) -> npt.NDArray[np.float32]: """Retrieve the coordinates of mesh nodes Parameters ---------- Returns ------- array-like Coordinates of the mesh nodes as returned by `BaseMesh.coord` """ return self.mesh.coord def values(self): """Retrieve the values stored for mesh nodes Parameters ---------- Returns ------- array-like Values on the mesh nodes as returned by `BaseMesh.values` """ return self.mesh.values def get_index_by_id(self, node_id): """Converts mesh ID to mesh index. Parameters ---------- node_id : int ID of the node of interest Returns ------- int Index of the node of interest """ return self.id_to_index[node_id] def get_id_by_index(self, index: int): """Converts mesh index to mesh ID. Parameters ---------- index : int Index of the node of interest. Returns ------- int ID of the node of interest """ return self.index_to_id[index] @property def id_to_index(self) -> Dict[int, int]: """Read-only property returning the mapping of ID to index Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._id_to_index is None: self._id_to_index = {node_id: index for index, node_id in enumerate(self().keys())} return self._id_to_index @property def index_to_id(self) -> Dict[int, int]: """Read-only property returning the mapping of index to ID Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._index_to_id is None: self._index_to_id = dict(enumerate(self().keys())) return self._index_to_id # def get_indexes_around_index(self, index): # indexes_around_index = self.__dict__.get('indexes_around_index') # if indexes_around_index is None: # def append(geom): # for simplex in geom: # for i, j in permutations(simplex, 2): # indexes_around_index[i].add(j) # indexes_around_index = defaultdict(set) # append(self.gr3.elements.triangles()) # append(self.gr3.elements.quads()) # self.__dict__['indexes_around_index'] = indexes_around_index # return list(indexes_around_index[index]) class Elements: """Helper class for handling mesh elements. Attributes ---------- Methods -------- __call__() Creates a mapping between element IDs and associated node IDs. id() Returns a list of element IDs. index() Returns an array of element indexes. array() Creates and returns a masked array of element node indices. triangles() Creates and returns a 2D array of triangular element node indices. quads() Creates and returns a 2D array of quadrangular element node indices. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. geodataframe() Creates and returns a dataframe of with polygon entires for each element. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize the element handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles elements info. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> Dict[int, npt.NDArray[int]]: """Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ elements = {i+1: index+1 for i, index in enumerate(self.mesh.msh_t.tria3['index'])} elements.update({i+len(elements)+1: index+1 for i, index in enumerate(self.mesh.msh_t.quad4['index'])}) return elements @lru_cache(maxsize=1) def id(self) -> List[int]: """Retrieves the list of element IDs as returned by `__call__` Parameters ---------- Returns ------- list of int List of element IDs. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return list(self().keys()) @lru_cache(maxsize=1) def index(self) -> npt.NDArray[int]: """Retrieves an array of element indices Parameters ---------- Returns ------- npt.NDArray 1D array of element indices. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.arange(len(self())) def array(self) -> npt.NDArray[int]: """Retrieves a masked array of element node IDs. The return value is ``n x m`` where ``n`` is the number of elements and ``m`` is the maximum number of element nodes, e.g. if there are only trias, then it's 3, for trias and quads it is 4. Parameters ---------- Returns ------- npt.NDArray Masked array where elements with fewer associated nodes have trailing masked node columns in the array. """ rank = int(max(map(len, self().values()))) array = np.full((len(self()), rank), -1) for i, elem_nd_ids in enumerate(self().values()): row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))) array[i, :len(row)] = row return np.ma.masked_equal(array, -1) @lru_cache(maxsize=1) def triangles(self) -> npt.NDArray[int]: """Retrieves an array of tria element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for triangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 3]) @lru_cache(maxsize=1) def quads(self): """Retrieves an array of quad element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for quadrangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 4]) def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.triangles().tolist() for quad in self.quads(): # TODO: Not tested. triangles.append([quad[0], quad[1], quad[3]]) triangles.append([quad[1], quad[2], quad[3]]) return Triangulation( self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) def geodataframe(self) -> gpd.GeoDataFrame: """Create polygons for each element and return in dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe created from entries of `Polygon` type for each element. """ data = [] for elem_id, elem_nd_ids in self().items(): data.append({ 'geometry': Polygon( self.mesh.coord[list( map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) class Boundaries: """Helper class for mesh boundary condition calculation Attributes ---------- data : dict Mapping for boundary information Methods ------- __call__() Retrieves a dataframe for all boundary shapes and type info. __len__() Gets the number of calculated boundary segments. ocean() Retrieves a dataframe containing shapes and type info of ocean boundaries land() Retrieves a dataframe containing shapes and type info of land boundaries interior() Retrieves a dataframe containing shapes and type info of island boundaries auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1) Automatically generate boundary information based on the input land indicator `threshold` """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize boundary helper object Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates boundaries. """ # TODO: Add a way to manually initialize self.mesh = mesh self._ocean = gpd.GeoDataFrame() self._land = gpd.GeoDataFrame() self._interior = gpd.GeoDataFrame() self._data = defaultdict(defaultdict) @lru_cache(maxsize=1) def _init_dataframes(self) -> None: """Internal: Creates boundary dataframes based on boundary data Parameters ---------- Returns ------- None Notes ----- This method doesn't have any return value, but it is cached so that on re-execution it doesn't recalculate. """ boundaries = self._data ocean_boundaries = [] land_boundaries = [] interior_boundaries = [] if boundaries is not None: for ibtype, bnds in boundaries.items(): if ibtype is None: for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) ocean_boundaries.append({ 'id': bnd_id, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) elif str(ibtype).endswith('1'): for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) interior_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) else: for bnd_id, data in bnds.items(): _indexes = np.array(data['indexes']) if _indexes.ndim > 1: # ndim > 1 implies we're dealing with an ADCIRC # mesh that includes boundary pairs, such as weir new_indexes = [] for i, line in enumerate(_indexes.T): if i % 2 != 0: new_indexes.extend(np.flip(line)) else: new_indexes.extend(line) _indexes = np.array(new_indexes).flatten() else: _indexes = _indexes.flatten() indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes)) land_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) self._ocean = gpd.GeoDataFrame(ocean_boundaries) self._land = gpd.GeoDataFrame(land_boundaries) self._interior = gpd.GeoDataFrame(interior_boundaries) def ocean(self) -> gpd.GeoDataFrame: """Retrieve the ocean boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of ocean open boundary. """ self._init_dataframes() return self._ocean def land(self): """Retrieve the land boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of land boundary. """ self._init_dataframes() return self._land def interior(self): """Retrieve the island boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of island boundary. """ self._init_dataframes() return self._interior @property def data(self) -> Dict[Optional[int], Any]: """Read-only property referencing the boundary data dictionary""" return self._data @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ self._init_dataframes() data = [] for bnd in self.ocean().itertuples(): data.append({ 'id': bnd.id, 'ibtype': None, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.land().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.interior().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def __len__(self) -> int: """Returns the number of boundary segments""" return len(self()) def auto_generate( self, threshold: float = 0., land_ibtype: int = 0, interior_ibtype: int = 1, ): """Automatically detect boundaries based on elevation data. Parameters ---------- threshold : float, default=0 Threshold above which nodes are considered dry nodes for ocean vs land boundary detection land_ibtype : int, default=0 Value to assign to land boundary type interior_ibtype : int, default=1 Value to assign to island boundary type Returns ------- None Raises ------ ValueError If any of the values assigned to a mesh node is `np.nan`. Notes ----- An edge is considered dry if any of the attached nodes are dry (its elevation is larger than or equal to the `threshold`). """ values = self.mesh.value if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") coords = self.mesh.msh_t.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} polys = utils.get_mesh_polygons(self.mesh.msh_t) # TODO: Split using shapely to get bdry segments boundaries = defaultdict(defaultdict) bdry_type = dict get_id = self.mesh.nodes.get_id_by_index # generate exterior boundaries for poly in polys: ext_ring_coo = poly.exterior.coords ext_ring = np.array([ (coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[e + 1]]) for e, coo in enumerate(ext_ring_coo[:-1])]) # find boundary edges edge_tag = np.full(ext_ring.shape, 0) edge_tag[ np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1 edge_tag[ np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1 edge_tag[ np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1 edge_tag[ np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1 # sort boundary edges ocean_boundary = [] land_boundary = [] for i, (e0, e1) in enumerate(edge_tag): if np.any(np.asarray((e0, e1)) == 1): land_boundary.append(tuple(ext_ring[i, :])) elif np.any(np.asarray((e0, e1)) == -1): ocean_boundary.append(tuple(ext_ring[i, :])) # ocean_boundaries = utils.sort_edges(ocean_boundary) # land_boundaries = utils.sort_edges(land_boundary) ocean_boundaries = [] if len(ocean_boundary) != 0: #pylint: disable=not-an-iterable ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist()) ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs ocean_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in ocean_segs] land_boundaries = [] if len(land_boundary) != 0: #pylint: disable=not-an-iterable land_segs = linemerge(coords[np.array(land_boundary)].tolist()) land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs land_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in land_segs] _bnd_id = len(boundaries[None]) for bnd in ocean_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[None][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # add land boundaries _bnd_id = len(boundaries[land_ibtype]) for bnd in land_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[land_ibtype][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # generate interior boundaries _bnd_id = 0 interior_boundaries = defaultdict() for poly in polys: interiors = poly.interiors for interior in interiors: int_ring_coo = interior.coords int_ring = [ (coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[e + 1]]) for e, coo in enumerate(int_ring_coo[:-1])] # TODO: Do we still need these? e0, e1 = [list(t) for t in zip(*int_ring)] if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0: e0 = e0[::-1] e1 = e1[::-1] e0 = [get_id(vert) for vert in e0] e0.append(e0[0]) interior_boundaries[_bnd_id] = e0 _bnd_id += 1 for bnd_id, data in interior_boundaries.items(): boundaries[interior_ibtype][bnd_id] = bdry_type( indexes=data, properties={}) self._data = boundaries self._init_dataframes.cache_clear() self.__call__.cache_clear() self._init_dataframes() SortedRingType = Dict[int, Dict[Literal['exterior', 'interiors'], Union[npt.NDArray, List[npt.NDArray]]] ] def sort_rings( index_rings: List[List[Tuple[int, int]]], vertices: npt.NDArray[np.float32]) -> SortedRingType: """Sorts a list of index-rings. Takes a list of unsorted index rings and sorts them into "exterior" and "interior" components. Any doubly-nested rings are considered exterior rings. Parameters ---------- index_rings : List[List[Tuple[int, int]]] Unosorted list of list of mesh edges as specified by end node indexs of each edge. vertices : npt.NDArray[np.float32] 2D ``n x 2`` array of node coordinate couples. Returns ------- SortedRingType Dictionary of information aboout polygon boundaries extracted based on the input Notes ----- The return value is a mapping of ring index to dictionary containing exterior and interior linear ring information as numpy array This function is not currently used, instead a different faster approach is used for boundary and polygon calculation from elements. """ # TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can # probably be optimized using shapely. # sort index_rings into corresponding "polygons" areas = [] for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = {} _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = [] for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = [] for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings def _mesh_interpolate_worker( coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[str, Path], chunk_size: Optional[int], method: Literal['spline', 'linear', 'nearest'] = "spline", filter_by_shape: bool = False): """Interpolator worker function to be used in parallel calls Parameters ---------- coords : npt.NDArray[np.float32] Mesh node coordinates. coords_crs : CRS Coordinate reference system of the input mesh coordinates. raster_path : str or Path Path to the raster temporary working file. chunk_size : int or None Chunk size for windowing over the raster. method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- idxs : npt.NDArray[bool] Mask of the nodes whose values are updated by current interpolation values : npt.NDArray[np.float32] Interpolated values. Raises ------ ValueError If specified interpolation `method` is not supported. """ coords = np.array(coords) raster = Raster(raster_path) idxs = [] values = [] for window in raster.iter_windows(chunk_size=chunk_size, overlap=2): if not raster.crs.equals(coords_crs): transformer = Transformer.from_crs( coords_crs, raster.crs, always_xy=True) # pylint: disable=E0633 coords[:, 0], coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) xi = raster.get_x(window) yi = raster.get_y(window) # Use masked array to ignore missing values from DEM zi = raster.get_values(window=window, masked=True) if not filter_by_shape: _idxs = np.logical_and( np.logical_and( np.min(xi) <= coords[:, 0], np.max(xi) >= coords[:, 0]), np.logical_and( np.min(yi) <= coords[:, 1], np.max(yi) >= coords[:, 1])) else: shape = raster.get_multipolygon() gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1]) _idxs = gs_pt.intersects(shape) interp_mask = None if method == 'spline': f = RectBivariateSpline( xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0, # bbox=[min(x), max(x), min(y), max(y)] # ?? ) _values = f.ev(coords[_idxs, 0], coords[_idxs, 1]) elif method in ['nearest', 'linear']: # Inspired by StackOverflow 35807321 if np.any(zi.mask): m_interp = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method ) # Pick nodes NOT "contaminated" by masked values interp_mask = m_interp(coords[_idxs]) > 0 f = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi).T, method=method ) _values = f(coords[_idxs]) else: raise ValueError( f"Invalid value method specified <{method}>!") if interp_mask is not None: # pylint: disable=invalid-unary-operand-type helper = np.ones_like(_values).astype(bool) helper[interp_mask] = False # _idxs is inverse mask _idxs[_idxs] = helper _values = _values[~interp_mask] idxs.append(_idxs) values.append(_values) return (np.hstack(idxs), np.hstack(values))
__call__
Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values.
"""This module defines classes that handle mesh and mesh operations. This module defines a factory class for mesh, similar to geometry and size function factory class. It also defines concrete mesh types. Currently two concrete mesh types are defined for generic Eucledian mesh and specific 2D Eucledian mesh. """ from functools import lru_cache import logging from multiprocessing import Pool, cpu_count import os import pathlib from collections import defaultdict import warnings from typing import Union, List, Tuple, Dict, Any, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal import pandas as pd import geopandas as gpd from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk from matplotlib.path import Path from matplotlib.transforms import Bbox from matplotlib.tri import Triangulation from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np import numpy.typing as npt from pyproj import CRS, Transformer from scipy.interpolate import ( RectBivariateSpline, RegularGridInterpolator) from shapely.geometry import ( LineString, box, Polygon, MultiPolygon) from shapely.ops import polygonize, linemerge from ocsmesh import utils from ocsmesh.raster import Raster from ocsmesh.mesh.base import BaseMesh from ocsmesh.mesh.parsers import grd, sms2dm _logger = logging.getLogger(__name__) class EuclideanMesh(BaseMesh): """Generic Euclidean mesh class This is the base class for 2D or 3D Euclidean mesh. Attributes ---------- tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t] Reference to underlying jigsaw mesh's triangle element structure. triangles : npt.NDArray[np.float32] Array of node index for triangular elements. quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t] Reference to underlying jigsaw mesh's quadrangle element structure. quads : npt.NDArray[np.float32] Array of node index for quadrangular elements. crs : CRS Coodrinate reference system of the mesh object hull : Hull Handle to hull calculation helper object nodes : Nodes Handle to node handler helper object elements : Elements Handle to element handler helper object Methods ------- write(path, overwrite=False, format='grd') Export mesh object to the disk in the specified format. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type. """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID != 'euclidean-mesh': raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, ' "but expected 'euclidean-mesh'.") if not hasattr(mesh, 'crs'): warnings.warn('Input mesh has no CRS information.') mesh.crs = None else: if not isinstance(mesh.crs, CRS): raise ValueError(f'crs property must be of type {CRS}, not ' f'type {type(mesh.crs)}.') self._hull = None self._nodes = None self._elements = None self._msh_t = mesh def write( self, path: Union[str, os.PathLike], overwrite: bool = False, format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622 ) -> None: """Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported. """ path = pathlib.Path(path) if path.exists() and overwrite is not True: raise IOError( f'File {str(path)} exists and overwrite is not True.') if format == 'grd': grd_dict = utils.msh_t_to_grd(self.msh_t) if self._boundaries and self._boundaries.data: grd_dict.update(boundaries=self._boundaries.data) grd.write(grd_dict, path, overwrite) elif format == '2dm': sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite) elif format == 'msh': savemsh(str(path), self.msh_t) elif format == 'vtk': savevtk(str(path), self.msh_t) else: raise ValueError(f'Unhandled format {format}.') @property def tria3(self): """Reference to underlying mesh tirangle element structure""" return self.msh_t.tria3 @property def triangles(self): """Reference to underlying mesh triangle element index array""" return self.msh_t.tria3['index'] @property def quad4(self): """Reference to underlying mesh quadrangle element structure""" return self.msh_t.quad4 @property def quads(self): """Reference to underlying mesh quadrangle element index array""" return self.msh_t.quad4['index'] @property def crs(self): """Reference to underlying mesh crs""" return self.msh_t.crs @property def hull(self): """Reference to hull calculator helper object""" if self._hull is None: self._hull = Hull(self) return self._hull @property def nodes(self): """Reference to node handler helper object""" if self._nodes is None: self._nodes = Nodes(self) return self._nodes @property def elements(self): """Reference to element handler helper object""" if self._elements is None: self._elements = Elements(self) return self._elements class EuclideanMesh2D(EuclideanMesh): """2D Euclidean mesh definition Attributes ---------- boundaries vert2 value bbox Methods ------- get_bbox(crs=None, output_type=None) Gets the bounding box of the mesh elements. tricontourf(**kwargs) Create a contour plot from the value data on the nodes of the mesh interpolate(raster, method='spline', nprocs=None) Interpolate raster date on the nodes. get_contour(level) Get contour lines from node value data at specified levels. get_multipolygon(zmin=None, zmax=None) Get multipolygon of the mesh hull. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``. """ super().__init__(mesh) self._boundaries = None if mesh.ndims != +2: raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, ' "but expected ndims=2.") if len(self.msh_t.value) == 0: self.msh_t.value = np.array( np.full((self.vert2['coord'].shape[0], 1), np.nan)) def get_bbox( self, crs: Union[str, CRS, None] = None, output_type: Literal[None, 'polygon', 'bbox'] = None ) -> Union[Polygon, Bbox]: """Get the bounding box of mesh elements. Parameters ---------- crs : str or CRS or None, default=None CRS to transform the calculated bounding box into before returning output_type : { None, 'polygon', 'bbox'}, default=None Output type Returns ------- Polygon or Bbox Bounding box of the mesh elements. """ output_type = 'polygon' if output_type is None else output_type xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0]) ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1]) crs = self.crs if crs is None else crs if crs is not None: if not self.crs.equals(crs): transformer = Transformer.from_crs( self.crs, crs, always_xy=True) # pylint: disable=E0633 (xmin, xmax), (ymin, ymax) = transformer.transform( (xmin, xmax), (ymin, ymax)) if output_type == 'polygon': # pylint: disable=R1705 return box(xmin, ymin, xmax, ymax) elif output_type == 'bbox': return Bbox([[xmin, ymin], [xmax, ymax]]) raise TypeError( 'Argument output_type must a string literal \'polygon\' or ' '\'bbox\'') @property def boundaries(self): """Handle to boundaries calculator helper object""" if self._boundaries is None: self._boundaries = Boundaries(self) return self._boundaries def tricontourf(self, **kwargs) -> Axes: """Generate contour for the data of triangular elements of the mesh Parameters ---------- **kwargs : dict, optional Passed to underlying `matplotlib` API. Returns ------- Axes Axes on which the filled contour is drawn. """ return utils.tricontourf(self.msh_t, **kwargs) def interpolate( self, raster: Union[Raster, List[Raster]], method: Literal['spline', 'linear', 'nearest'] = 'spline', nprocs: Optional[int] = None, info_out_path: Union[pathlib.Path, str, None] = None, filter_by_shape: bool = False ) -> None: """Interplate values from raster inputs to the mesh nodes. Parameters ---------- raster : Raster or list of Raster A single or a list of rasters from which values are interpolated onto the mesh method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. nprocs : int or None, default=None Number of workers to use when interpolating data. info_out_path : pathlike or str or None Path for the output node interpolation information file filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- None """ if isinstance(raster, Raster): raster = [raster] nprocs = -1 if nprocs is None else nprocs nprocs = cpu_count() if nprocs == -1 else nprocs # Fix an issue on Jupyter notebook where having pool execute # interpolation even in case of nprocs == 1 would results in # application getting stuck if nprocs > 1: with Pool(processes=nprocs) as pool: res = pool.starmap( _mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] ) pool.join() else: res = [_mesh_interpolate_worker( self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] values = self.msh_t.value.flatten() interp_info_map = {} for (mask, _values), rast in zip(res, raster): values[mask] = _values if info_out_path is not None: vert_cs = None rast_crs = rast.crs if rast_crs.is_vertical: if rast_crs.sub_crs_list is not None: for sub_crs in rast_crs.sub_crs_list: if sub_crs.is_vertical: # TODO: What if sub CRS is compound, etc.? vert_cs = sub_crs elif rast_crs.source_crs is not None: if rast_crs.source_crs.is_vertical: # TODO: What if source CRS is compound, etc.? vert_cs = rast_crs.source_crs vert_cs_name = vert_cs.name idxs = np.argwhere(mask).ravel() interp_info_map.update({ idx: (rast.path, vert_cs_name) for idx in idxs}) if info_out_path is not None: coords = self.msh_t.vert2['coord'].copy() geo_coords = coords.copy() if not self.crs.is_geographic: transformer = Transformer.from_crs( self.crs, CRS.from_epsg(4326), always_xy=True) # pylint: disable=E0633 geo_coords[:, 0], geo_coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) vd_idxs=np.array(list(interp_info_map.keys())) df_interp_info = pd.DataFrame( index=vd_idxs, data={ 'x': coords[vd_idxs, 0], 'y': coords[vd_idxs, 1], 'lat': geo_coords[vd_idxs, 0], 'lon': geo_coords[vd_idxs, 1], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()] } ) df_interp_info.sort_index().to_csv( info_out_path, header=False, index=True) self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) def get_contour(self, level: float) -> LineString: """Extract contour lines at the specified `level` from mesh values Parameters ---------- level : float The level at which contour lines must be extracted. Returns ------- LineString Extracted and merged contour lines. Raises ------ ValueError If mesh has nodes that have null value `np.nan`. """ # ONLY SUPPORTS TRIANGLES for attr in ['quad4', 'hexa8']: if len(getattr(self.msh_t, attr)) > 0: warnings.warn( 'Mesh contour extraction only supports triangles') coords = self.msh_t.vert2['coord'] values = self.msh_t.value trias = self.msh_t.tria3['index'] if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") x, y = coords[:, 0], coords[:, 1] features = [] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) _logger.debug('Computing contours...') fig, ax = plt.subplots() ax.tricontour( x, y, trias, values.ravel(), levels=[level]) plt.close(fig) for path_collection in ax.collections: for path in path_collection.get_paths(): try: features.append(LineString(path.vertices)) except ValueError: # LineStrings must have at least 2 coordinate tuples pass return linemerge(features) def get_multipolygon( self, zmin: Optional[float] = None, zmax: Optional[float] = None ) -> MultiPolygon: """Calculate multipolygon covering mesh elements (hull) Parameters ---------- zmin : float or None Minimum elevation to consider for multipolygon extraction zmax : float or None Maximum elevation to consider for multipolygon extraction Returns ------- MultiPolygon Calculated multipolygon shape """ values = self.msh_t.value mask = np.ones(values.shape) if zmin is not None: mask = np.logical_and(mask, values > zmin) if zmax is not None: mask = np.logical_and(mask, values < zmax) # Assuming value is of shape (N, 1) # ravel to make sure it's 1D verts_in = np.argwhere(mask).ravel() clipped_mesh = utils.clip_mesh_by_vertex( self.msh_t, verts_in, can_use_other_verts=True) boundary_edges = utils.get_boundary_edges(clipped_mesh) coords = clipped_mesh.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} poly_gen = polygonize(coords[boundary_edges]) polys = list(poly_gen) polys = sorted(polys, key=lambda p: p.area, reverse=True) rings = [p.exterior for p in polys] n_parents = np.zeros((len(rings),)) represent = np.array([r.coords[0] for r in rings]) for e, ring in enumerate(rings[:-1]): path = Path(ring.coords, closed=True) n_parents = n_parents + np.pad( np.array([ path.contains_point(pt) for pt in represent[e+1:]]), (e+1, 0), 'constant', constant_values=0) # Get actual polygons based on logic described above polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2] return MultiPolygon(polys) @property def vert2(self): """Reference to underlying mesh 2D vertices structure""" return self.msh_t.vert2 @property def value(self): """Reference to underlying mesh values""" return self.msh_t.value @property def bbox(self): """Calculates and returns bounding box of the mesh hull. See Also -------- get_bbox """ return self.get_bbox() MeshType = Union[EuclideanMesh2D] class Mesh(BaseMesh): """Mesh object factory Factory class that creates and returns concrete mesh object based on the input types. Methods ------- open(path, crs=None) Read mesh data from a file on disk. """ def __new__(cls, mesh: jigsaw_msh_t) -> MeshType: """Construct a concrete mesh object. Parameters ---------- mesh : jigsaw_msh_t Input jigsaw mesh object Returns ------- MeshType Mesh object created from the input Raises ------ TypeError Input `mesh` is not a `jigsaw_msh_t` object. NotImplementedError Input `mesh` object cannot be used to create a EuclideanMesh2D """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID == 'euclidean-mesh': if mesh.ndims == 2: return EuclideanMesh2D(mesh) raise NotImplementedError( f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not ' 'handled.') raise NotImplementedError(f'mshID={mesh.mshID} not handled.') @staticmethod def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType: """Read mesh from a file on disk Parameters ---------- path : path-like Path to the file containig mesh. crs : CRS or None, default=None CRS of the mesh in the path. Overwrites any info read from file, no transformation is done. Returns ------- MeshType Mesh object created by reading the file. Raises ------ TypeError If cannot determine the input mesh type. Notes ----- Currently only SMS-2DM and GRD formats are supported for reading. """ try: msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs)) msh_t.value = np.negative(msh_t.value) return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 if 'not a valid grd file' in str(e): pass else: raise e try: return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs))) except ValueError: pass try: msh_t = jigsaw_msh_t() loadmsh(msh_t, path) msh_t.crs = crs return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 pass raise TypeError( f'Unable to automatically determine file type for {str(path)}.') class Rings: """Helper class for handling mesh rings. This is a helper class to manage the calculation of internal and external rings of the mesh polygon or hull. Attributes ---------- Methods ------- __call__() Returns all rings of the mesh hull interior() Return the interior rings of the mesh hull exterior() Return the exterior rings of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the ring calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates rings. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ polys = utils.get_mesh_polygons(self.mesh.msh_t) data = [] bnd_id = 0 for poly in polys: data.append({ "geometry": poly.exterior, "bnd_id": bnd_id, "type": 'exterior' }) for interior in poly.interiors: data.append({ "geometry": interior, "bnd_id": bnd_id, "type": 'interior' }) bnd_id = bnd_id + 1 return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Extracts the exterior ring from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior ring of the mesh hull polygon. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Extracts the interior rings from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior rings of the mesh hull polygon. """ return self().loc[self()['type'] == 'interior'] class Edges: """Helper class for handling mesh boundary edges. Attributes ---------- Methods ------- __call__() Return all boundary edges of the mesh hull interior() Return the interior boundary edges of the mesh hull exterior() Return the exterior boundary edges of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the edge calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which boundary edges are calculated. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for ring in self.mesh.hull.rings().itertuples(): coords = ring.geometry.coords for i in range(1, len(coords)): data.append({ "geometry": LineString([coords[i-1], coords[i]]), "bnd_id": ring.bnd_id, "type": ring.type}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Retruns exterior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Retruns interior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'interior'] class Hull: """Helper class for handling mesh hull calculations. This class wraps the functionality of ring and edge classes and adds additional methods to calculate or extract the polygon or triangulation of the mesh Attributes ---------- Methods ------- __call__() Calculates all the polys from all mesh rings exterior() Calculates the exterior rings of the mesh hull. interior() Calculates the interior rings of the mesh hull. implode() Calculates all the polygons (including isolated domain islands) in the mesh and returns a table of polygons. multipolygon() Calculates all the polygons (including isolated domain islands) in the mesh and returns a multipolygon. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize helper class for handling mesh hull calculations Parameters ---------- mesh : EuclideanMesh Input mesh for which hull calculations are done. Notes ----- This object holds onto the ring and edge calculator objects as well as a reference to the input mesh. """ self.mesh = mesh self.rings = Rings(mesh) self.edges = Edges(mesh) # MASKED: __call__ function (lines 921-963) def exterior(self) -> gpd.GeoDataFrame: """Creates polygons from exterior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from exterior rings of the mesh hull """ data = [] for exterior in self.rings().loc[ self.rings()['type'] == 'exterior'].itertuples(): data.append({"geometry": Polygon(exterior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def interior(self) -> gpd.GeoDataFrame: """Creates polygons from interior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from interior rings of the mesh hull """ data = [] for interior in self.rings().loc[ self.rings()['type'] == 'interior'].itertuples(): data.append({"geometry": Polygon(interior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def implode(self) -> gpd.GeoDataFrame: """Creates a dataframe from mesh polygons. Parameters ---------- Returns ------ gpd.GeoDataFrame Dataframe containing polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The difference of the return value of this method and `__call__` is that the `implode` returns a dataframe with a single `MultiPolygon` where as `__call__` returns a dataframe with multiple `Polygon` entries with associated `bnd_id`. """ return gpd.GeoDataFrame( {"geometry": MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) def multipolygon(self) -> MultiPolygon: """Returns mesh multi-polygons. Parameters ---------- Returns ------ MultiPolygon Combined shape of polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. implode() Dataframe with a single combined multipolygon of the mesh polygons. Notes ----- The difference of the return value of this method and `implode` is that `multipolygon` returns a `MultiPolygon` object where as `implode` returns a dataframe warpping the multipolygon object. """ mp = self.implode().iloc[0].geometry if isinstance(mp, Polygon): mp = MultiPolygon([mp]) return mp def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.mesh.msh_t.tria3['index'].tolist() for quad in self.mesh.msh_t.quad4['index']: triangles.extend([ [quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]] ]) return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) class Nodes: """Helper class for handling mesh nodes. Attributes ---------- id_to_index : dict Mapping to convert node IDs to node indexes. index_to_id : dict Mapping to convert node indexes to node IDs. Methods ------- __call__() Creates a mapping between node IDs (index + 1) and node coordinates id() Returns list of node IDs. index() Return array of node indices. coords() Return mesh coordinates. values() Return values stored for mesh nodes. get_index_by_id(node_id) Get the node index based on node ID. get_id_by_index(index) Get the node ID based on the node index. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes node handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles nodes info. """ self.mesh = mesh self._id_to_index = None self._index_to_id = None @lru_cache(maxsize=1) def __call__(self) -> Dict[int, int]: """Creates a mapping between node IDs and indexes. Parameters ---------- Returns ------- dict Mapping between node IDs and indexes. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return {i+1: coord for i, coord in enumerate(self.coords())} def id(self) -> List[int]: """Retrives a list of element IDs. Parameters ---------- Returns ------- list of int List of node IDs as created by `__call__` """ return list(self().keys()) def index(self) -> npt.NDArray[int]: """Retrives an array of element indexes. Parameters ---------- Returns ------- array-like Array of node indexes. """ return np.arange(len(self())) def coords(self) -> npt.NDArray[np.float32]: """Retrieve the coordinates of mesh nodes Parameters ---------- Returns ------- array-like Coordinates of the mesh nodes as returned by `BaseMesh.coord` """ return self.mesh.coord def values(self): """Retrieve the values stored for mesh nodes Parameters ---------- Returns ------- array-like Values on the mesh nodes as returned by `BaseMesh.values` """ return self.mesh.values def get_index_by_id(self, node_id): """Converts mesh ID to mesh index. Parameters ---------- node_id : int ID of the node of interest Returns ------- int Index of the node of interest """ return self.id_to_index[node_id] def get_id_by_index(self, index: int): """Converts mesh index to mesh ID. Parameters ---------- index : int Index of the node of interest. Returns ------- int ID of the node of interest """ return self.index_to_id[index] @property def id_to_index(self) -> Dict[int, int]: """Read-only property returning the mapping of ID to index Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._id_to_index is None: self._id_to_index = {node_id: index for index, node_id in enumerate(self().keys())} return self._id_to_index @property def index_to_id(self) -> Dict[int, int]: """Read-only property returning the mapping of index to ID Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._index_to_id is None: self._index_to_id = dict(enumerate(self().keys())) return self._index_to_id # def get_indexes_around_index(self, index): # indexes_around_index = self.__dict__.get('indexes_around_index') # if indexes_around_index is None: # def append(geom): # for simplex in geom: # for i, j in permutations(simplex, 2): # indexes_around_index[i].add(j) # indexes_around_index = defaultdict(set) # append(self.gr3.elements.triangles()) # append(self.gr3.elements.quads()) # self.__dict__['indexes_around_index'] = indexes_around_index # return list(indexes_around_index[index]) class Elements: """Helper class for handling mesh elements. Attributes ---------- Methods -------- __call__() Creates a mapping between element IDs and associated node IDs. id() Returns a list of element IDs. index() Returns an array of element indexes. array() Creates and returns a masked array of element node indices. triangles() Creates and returns a 2D array of triangular element node indices. quads() Creates and returns a 2D array of quadrangular element node indices. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. geodataframe() Creates and returns a dataframe of with polygon entires for each element. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize the element handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles elements info. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> Dict[int, npt.NDArray[int]]: """Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ elements = {i+1: index+1 for i, index in enumerate(self.mesh.msh_t.tria3['index'])} elements.update({i+len(elements)+1: index+1 for i, index in enumerate(self.mesh.msh_t.quad4['index'])}) return elements @lru_cache(maxsize=1) def id(self) -> List[int]: """Retrieves the list of element IDs as returned by `__call__` Parameters ---------- Returns ------- list of int List of element IDs. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return list(self().keys()) @lru_cache(maxsize=1) def index(self) -> npt.NDArray[int]: """Retrieves an array of element indices Parameters ---------- Returns ------- npt.NDArray 1D array of element indices. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.arange(len(self())) def array(self) -> npt.NDArray[int]: """Retrieves a masked array of element node IDs. The return value is ``n x m`` where ``n`` is the number of elements and ``m`` is the maximum number of element nodes, e.g. if there are only trias, then it's 3, for trias and quads it is 4. Parameters ---------- Returns ------- npt.NDArray Masked array where elements with fewer associated nodes have trailing masked node columns in the array. """ rank = int(max(map(len, self().values()))) array = np.full((len(self()), rank), -1) for i, elem_nd_ids in enumerate(self().values()): row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))) array[i, :len(row)] = row return np.ma.masked_equal(array, -1) @lru_cache(maxsize=1) def triangles(self) -> npt.NDArray[int]: """Retrieves an array of tria element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for triangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 3]) @lru_cache(maxsize=1) def quads(self): """Retrieves an array of quad element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for quadrangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 4]) def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.triangles().tolist() for quad in self.quads(): # TODO: Not tested. triangles.append([quad[0], quad[1], quad[3]]) triangles.append([quad[1], quad[2], quad[3]]) return Triangulation( self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) def geodataframe(self) -> gpd.GeoDataFrame: """Create polygons for each element and return in dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe created from entries of `Polygon` type for each element. """ data = [] for elem_id, elem_nd_ids in self().items(): data.append({ 'geometry': Polygon( self.mesh.coord[list( map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) class Boundaries: """Helper class for mesh boundary condition calculation Attributes ---------- data : dict Mapping for boundary information Methods ------- __call__() Retrieves a dataframe for all boundary shapes and type info. __len__() Gets the number of calculated boundary segments. ocean() Retrieves a dataframe containing shapes and type info of ocean boundaries land() Retrieves a dataframe containing shapes and type info of land boundaries interior() Retrieves a dataframe containing shapes and type info of island boundaries auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1) Automatically generate boundary information based on the input land indicator `threshold` """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize boundary helper object Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates boundaries. """ # TODO: Add a way to manually initialize self.mesh = mesh self._ocean = gpd.GeoDataFrame() self._land = gpd.GeoDataFrame() self._interior = gpd.GeoDataFrame() self._data = defaultdict(defaultdict) @lru_cache(maxsize=1) def _init_dataframes(self) -> None: """Internal: Creates boundary dataframes based on boundary data Parameters ---------- Returns ------- None Notes ----- This method doesn't have any return value, but it is cached so that on re-execution it doesn't recalculate. """ boundaries = self._data ocean_boundaries = [] land_boundaries = [] interior_boundaries = [] if boundaries is not None: for ibtype, bnds in boundaries.items(): if ibtype is None: for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) ocean_boundaries.append({ 'id': bnd_id, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) elif str(ibtype).endswith('1'): for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) interior_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) else: for bnd_id, data in bnds.items(): _indexes = np.array(data['indexes']) if _indexes.ndim > 1: # ndim > 1 implies we're dealing with an ADCIRC # mesh that includes boundary pairs, such as weir new_indexes = [] for i, line in enumerate(_indexes.T): if i % 2 != 0: new_indexes.extend(np.flip(line)) else: new_indexes.extend(line) _indexes = np.array(new_indexes).flatten() else: _indexes = _indexes.flatten() indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes)) land_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) self._ocean = gpd.GeoDataFrame(ocean_boundaries) self._land = gpd.GeoDataFrame(land_boundaries) self._interior = gpd.GeoDataFrame(interior_boundaries) def ocean(self) -> gpd.GeoDataFrame: """Retrieve the ocean boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of ocean open boundary. """ self._init_dataframes() return self._ocean def land(self): """Retrieve the land boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of land boundary. """ self._init_dataframes() return self._land def interior(self): """Retrieve the island boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of island boundary. """ self._init_dataframes() return self._interior @property def data(self) -> Dict[Optional[int], Any]: """Read-only property referencing the boundary data dictionary""" return self._data @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ self._init_dataframes() data = [] for bnd in self.ocean().itertuples(): data.append({ 'id': bnd.id, 'ibtype': None, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.land().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.interior().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def __len__(self) -> int: """Returns the number of boundary segments""" return len(self()) def auto_generate( self, threshold: float = 0., land_ibtype: int = 0, interior_ibtype: int = 1, ): """Automatically detect boundaries based on elevation data. Parameters ---------- threshold : float, default=0 Threshold above which nodes are considered dry nodes for ocean vs land boundary detection land_ibtype : int, default=0 Value to assign to land boundary type interior_ibtype : int, default=1 Value to assign to island boundary type Returns ------- None Raises ------ ValueError If any of the values assigned to a mesh node is `np.nan`. Notes ----- An edge is considered dry if any of the attached nodes are dry (its elevation is larger than or equal to the `threshold`). """ values = self.mesh.value if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") coords = self.mesh.msh_t.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} polys = utils.get_mesh_polygons(self.mesh.msh_t) # TODO: Split using shapely to get bdry segments boundaries = defaultdict(defaultdict) bdry_type = dict get_id = self.mesh.nodes.get_id_by_index # generate exterior boundaries for poly in polys: ext_ring_coo = poly.exterior.coords ext_ring = np.array([ (coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[e + 1]]) for e, coo in enumerate(ext_ring_coo[:-1])]) # find boundary edges edge_tag = np.full(ext_ring.shape, 0) edge_tag[ np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1 edge_tag[ np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1 edge_tag[ np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1 edge_tag[ np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1 # sort boundary edges ocean_boundary = [] land_boundary = [] for i, (e0, e1) in enumerate(edge_tag): if np.any(np.asarray((e0, e1)) == 1): land_boundary.append(tuple(ext_ring[i, :])) elif np.any(np.asarray((e0, e1)) == -1): ocean_boundary.append(tuple(ext_ring[i, :])) # ocean_boundaries = utils.sort_edges(ocean_boundary) # land_boundaries = utils.sort_edges(land_boundary) ocean_boundaries = [] if len(ocean_boundary) != 0: #pylint: disable=not-an-iterable ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist()) ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs ocean_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in ocean_segs] land_boundaries = [] if len(land_boundary) != 0: #pylint: disable=not-an-iterable land_segs = linemerge(coords[np.array(land_boundary)].tolist()) land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs land_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in land_segs] _bnd_id = len(boundaries[None]) for bnd in ocean_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[None][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # add land boundaries _bnd_id = len(boundaries[land_ibtype]) for bnd in land_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[land_ibtype][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # generate interior boundaries _bnd_id = 0 interior_boundaries = defaultdict() for poly in polys: interiors = poly.interiors for interior in interiors: int_ring_coo = interior.coords int_ring = [ (coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[e + 1]]) for e, coo in enumerate(int_ring_coo[:-1])] # TODO: Do we still need these? e0, e1 = [list(t) for t in zip(*int_ring)] if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0: e0 = e0[::-1] e1 = e1[::-1] e0 = [get_id(vert) for vert in e0] e0.append(e0[0]) interior_boundaries[_bnd_id] = e0 _bnd_id += 1 for bnd_id, data in interior_boundaries.items(): boundaries[interior_ibtype][bnd_id] = bdry_type( indexes=data, properties={}) self._data = boundaries self._init_dataframes.cache_clear() self.__call__.cache_clear() self._init_dataframes() SortedRingType = Dict[int, Dict[Literal['exterior', 'interiors'], Union[npt.NDArray, List[npt.NDArray]]] ] def sort_rings( index_rings: List[List[Tuple[int, int]]], vertices: npt.NDArray[np.float32]) -> SortedRingType: """Sorts a list of index-rings. Takes a list of unsorted index rings and sorts them into "exterior" and "interior" components. Any doubly-nested rings are considered exterior rings. Parameters ---------- index_rings : List[List[Tuple[int, int]]] Unosorted list of list of mesh edges as specified by end node indexs of each edge. vertices : npt.NDArray[np.float32] 2D ``n x 2`` array of node coordinate couples. Returns ------- SortedRingType Dictionary of information aboout polygon boundaries extracted based on the input Notes ----- The return value is a mapping of ring index to dictionary containing exterior and interior linear ring information as numpy array This function is not currently used, instead a different faster approach is used for boundary and polygon calculation from elements. """ # TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can # probably be optimized using shapely. # sort index_rings into corresponding "polygons" areas = [] for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = {} _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = [] for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = [] for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings def _mesh_interpolate_worker( coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[str, Path], chunk_size: Optional[int], method: Literal['spline', 'linear', 'nearest'] = "spline", filter_by_shape: bool = False): """Interpolator worker function to be used in parallel calls Parameters ---------- coords : npt.NDArray[np.float32] Mesh node coordinates. coords_crs : CRS Coordinate reference system of the input mesh coordinates. raster_path : str or Path Path to the raster temporary working file. chunk_size : int or None Chunk size for windowing over the raster. method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- idxs : npt.NDArray[bool] Mask of the nodes whose values are updated by current interpolation values : npt.NDArray[np.float32] Interpolated values. Raises ------ ValueError If specified interpolation `method` is not supported. """ coords = np.array(coords) raster = Raster(raster_path) idxs = [] values = [] for window in raster.iter_windows(chunk_size=chunk_size, overlap=2): if not raster.crs.equals(coords_crs): transformer = Transformer.from_crs( coords_crs, raster.crs, always_xy=True) # pylint: disable=E0633 coords[:, 0], coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) xi = raster.get_x(window) yi = raster.get_y(window) # Use masked array to ignore missing values from DEM zi = raster.get_values(window=window, masked=True) if not filter_by_shape: _idxs = np.logical_and( np.logical_and( np.min(xi) <= coords[:, 0], np.max(xi) >= coords[:, 0]), np.logical_and( np.min(yi) <= coords[:, 1], np.max(yi) >= coords[:, 1])) else: shape = raster.get_multipolygon() gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1]) _idxs = gs_pt.intersects(shape) interp_mask = None if method == 'spline': f = RectBivariateSpline( xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0, # bbox=[min(x), max(x), min(y), max(y)] # ?? ) _values = f.ev(coords[_idxs, 0], coords[_idxs, 1]) elif method in ['nearest', 'linear']: # Inspired by StackOverflow 35807321 if np.any(zi.mask): m_interp = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method ) # Pick nodes NOT "contaminated" by masked values interp_mask = m_interp(coords[_idxs]) > 0 f = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi).T, method=method ) _values = f(coords[_idxs]) else: raise ValueError( f"Invalid value method specified <{method}>!") if interp_mask is not None: # pylint: disable=invalid-unary-operand-type helper = np.ones_like(_values).astype(bool) helper[interp_mask] = False # _idxs is inverse mask _idxs[_idxs] = helper _values = _values[~interp_mask] idxs.append(_idxs) values.append(_values) return (np.hstack(idxs), np.hstack(values))
@lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for bnd_id in np.unique(self.rings()['bnd_id'].tolist()): exterior = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior')] interiors = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior')] data.append({ "geometry": Polygon( exterior.iloc[0].geometry.coords, [row.geometry.coords for _, row in interiors.iterrows()]), "bnd_id": bnd_id }) return gpd.GeoDataFrame(data, crs=self.mesh.crs)
921
963
"""This module defines classes that handle mesh and mesh operations. This module defines a factory class for mesh, similar to geometry and size function factory class. It also defines concrete mesh types. Currently two concrete mesh types are defined for generic Eucledian mesh and specific 2D Eucledian mesh. """ from functools import lru_cache import logging from multiprocessing import Pool, cpu_count import os import pathlib from collections import defaultdict import warnings from typing import Union, List, Tuple, Dict, Any, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal import pandas as pd import geopandas as gpd from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk from matplotlib.path import Path from matplotlib.transforms import Bbox from matplotlib.tri import Triangulation from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np import numpy.typing as npt from pyproj import CRS, Transformer from scipy.interpolate import ( RectBivariateSpline, RegularGridInterpolator) from shapely.geometry import ( LineString, box, Polygon, MultiPolygon) from shapely.ops import polygonize, linemerge from ocsmesh import utils from ocsmesh.raster import Raster from ocsmesh.mesh.base import BaseMesh from ocsmesh.mesh.parsers import grd, sms2dm _logger = logging.getLogger(__name__) class EuclideanMesh(BaseMesh): """Generic Euclidean mesh class This is the base class for 2D or 3D Euclidean mesh. Attributes ---------- tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t] Reference to underlying jigsaw mesh's triangle element structure. triangles : npt.NDArray[np.float32] Array of node index for triangular elements. quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t] Reference to underlying jigsaw mesh's quadrangle element structure. quads : npt.NDArray[np.float32] Array of node index for quadrangular elements. crs : CRS Coodrinate reference system of the mesh object hull : Hull Handle to hull calculation helper object nodes : Nodes Handle to node handler helper object elements : Elements Handle to element handler helper object Methods ------- write(path, overwrite=False, format='grd') Export mesh object to the disk in the specified format. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type. """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID != 'euclidean-mesh': raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, ' "but expected 'euclidean-mesh'.") if not hasattr(mesh, 'crs'): warnings.warn('Input mesh has no CRS information.') mesh.crs = None else: if not isinstance(mesh.crs, CRS): raise ValueError(f'crs property must be of type {CRS}, not ' f'type {type(mesh.crs)}.') self._hull = None self._nodes = None self._elements = None self._msh_t = mesh def write( self, path: Union[str, os.PathLike], overwrite: bool = False, format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622 ) -> None: """Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported. """ path = pathlib.Path(path) if path.exists() and overwrite is not True: raise IOError( f'File {str(path)} exists and overwrite is not True.') if format == 'grd': grd_dict = utils.msh_t_to_grd(self.msh_t) if self._boundaries and self._boundaries.data: grd_dict.update(boundaries=self._boundaries.data) grd.write(grd_dict, path, overwrite) elif format == '2dm': sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite) elif format == 'msh': savemsh(str(path), self.msh_t) elif format == 'vtk': savevtk(str(path), self.msh_t) else: raise ValueError(f'Unhandled format {format}.') @property def tria3(self): """Reference to underlying mesh tirangle element structure""" return self.msh_t.tria3 @property def triangles(self): """Reference to underlying mesh triangle element index array""" return self.msh_t.tria3['index'] @property def quad4(self): """Reference to underlying mesh quadrangle element structure""" return self.msh_t.quad4 @property def quads(self): """Reference to underlying mesh quadrangle element index array""" return self.msh_t.quad4['index'] @property def crs(self): """Reference to underlying mesh crs""" return self.msh_t.crs @property def hull(self): """Reference to hull calculator helper object""" if self._hull is None: self._hull = Hull(self) return self._hull @property def nodes(self): """Reference to node handler helper object""" if self._nodes is None: self._nodes = Nodes(self) return self._nodes @property def elements(self): """Reference to element handler helper object""" if self._elements is None: self._elements = Elements(self) return self._elements class EuclideanMesh2D(EuclideanMesh): """2D Euclidean mesh definition Attributes ---------- boundaries vert2 value bbox Methods ------- get_bbox(crs=None, output_type=None) Gets the bounding box of the mesh elements. tricontourf(**kwargs) Create a contour plot from the value data on the nodes of the mesh interpolate(raster, method='spline', nprocs=None) Interpolate raster date on the nodes. get_contour(level) Get contour lines from node value data at specified levels. get_multipolygon(zmin=None, zmax=None) Get multipolygon of the mesh hull. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``. """ super().__init__(mesh) self._boundaries = None if mesh.ndims != +2: raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, ' "but expected ndims=2.") if len(self.msh_t.value) == 0: self.msh_t.value = np.array( np.full((self.vert2['coord'].shape[0], 1), np.nan)) def get_bbox( self, crs: Union[str, CRS, None] = None, output_type: Literal[None, 'polygon', 'bbox'] = None ) -> Union[Polygon, Bbox]: """Get the bounding box of mesh elements. Parameters ---------- crs : str or CRS or None, default=None CRS to transform the calculated bounding box into before returning output_type : { None, 'polygon', 'bbox'}, default=None Output type Returns ------- Polygon or Bbox Bounding box of the mesh elements. """ output_type = 'polygon' if output_type is None else output_type xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0]) ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1]) crs = self.crs if crs is None else crs if crs is not None: if not self.crs.equals(crs): transformer = Transformer.from_crs( self.crs, crs, always_xy=True) # pylint: disable=E0633 (xmin, xmax), (ymin, ymax) = transformer.transform( (xmin, xmax), (ymin, ymax)) if output_type == 'polygon': # pylint: disable=R1705 return box(xmin, ymin, xmax, ymax) elif output_type == 'bbox': return Bbox([[xmin, ymin], [xmax, ymax]]) raise TypeError( 'Argument output_type must a string literal \'polygon\' or ' '\'bbox\'') @property def boundaries(self): """Handle to boundaries calculator helper object""" if self._boundaries is None: self._boundaries = Boundaries(self) return self._boundaries def tricontourf(self, **kwargs) -> Axes: """Generate contour for the data of triangular elements of the mesh Parameters ---------- **kwargs : dict, optional Passed to underlying `matplotlib` API. Returns ------- Axes Axes on which the filled contour is drawn. """ return utils.tricontourf(self.msh_t, **kwargs) def interpolate( self, raster: Union[Raster, List[Raster]], method: Literal['spline', 'linear', 'nearest'] = 'spline', nprocs: Optional[int] = None, info_out_path: Union[pathlib.Path, str, None] = None, filter_by_shape: bool = False ) -> None: """Interplate values from raster inputs to the mesh nodes. Parameters ---------- raster : Raster or list of Raster A single or a list of rasters from which values are interpolated onto the mesh method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. nprocs : int or None, default=None Number of workers to use when interpolating data. info_out_path : pathlike or str or None Path for the output node interpolation information file filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- None """ if isinstance(raster, Raster): raster = [raster] nprocs = -1 if nprocs is None else nprocs nprocs = cpu_count() if nprocs == -1 else nprocs # Fix an issue on Jupyter notebook where having pool execute # interpolation even in case of nprocs == 1 would results in # application getting stuck if nprocs > 1: with Pool(processes=nprocs) as pool: res = pool.starmap( _mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] ) pool.join() else: res = [_mesh_interpolate_worker( self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] values = self.msh_t.value.flatten() interp_info_map = {} for (mask, _values), rast in zip(res, raster): values[mask] = _values if info_out_path is not None: vert_cs = None rast_crs = rast.crs if rast_crs.is_vertical: if rast_crs.sub_crs_list is not None: for sub_crs in rast_crs.sub_crs_list: if sub_crs.is_vertical: # TODO: What if sub CRS is compound, etc.? vert_cs = sub_crs elif rast_crs.source_crs is not None: if rast_crs.source_crs.is_vertical: # TODO: What if source CRS is compound, etc.? vert_cs = rast_crs.source_crs vert_cs_name = vert_cs.name idxs = np.argwhere(mask).ravel() interp_info_map.update({ idx: (rast.path, vert_cs_name) for idx in idxs}) if info_out_path is not None: coords = self.msh_t.vert2['coord'].copy() geo_coords = coords.copy() if not self.crs.is_geographic: transformer = Transformer.from_crs( self.crs, CRS.from_epsg(4326), always_xy=True) # pylint: disable=E0633 geo_coords[:, 0], geo_coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) vd_idxs=np.array(list(interp_info_map.keys())) df_interp_info = pd.DataFrame( index=vd_idxs, data={ 'x': coords[vd_idxs, 0], 'y': coords[vd_idxs, 1], 'lat': geo_coords[vd_idxs, 0], 'lon': geo_coords[vd_idxs, 1], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()] } ) df_interp_info.sort_index().to_csv( info_out_path, header=False, index=True) self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) def get_contour(self, level: float) -> LineString: """Extract contour lines at the specified `level` from mesh values Parameters ---------- level : float The level at which contour lines must be extracted. Returns ------- LineString Extracted and merged contour lines. Raises ------ ValueError If mesh has nodes that have null value `np.nan`. """ # ONLY SUPPORTS TRIANGLES for attr in ['quad4', 'hexa8']: if len(getattr(self.msh_t, attr)) > 0: warnings.warn( 'Mesh contour extraction only supports triangles') coords = self.msh_t.vert2['coord'] values = self.msh_t.value trias = self.msh_t.tria3['index'] if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") x, y = coords[:, 0], coords[:, 1] features = [] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) _logger.debug('Computing contours...') fig, ax = plt.subplots() ax.tricontour( x, y, trias, values.ravel(), levels=[level]) plt.close(fig) for path_collection in ax.collections: for path in path_collection.get_paths(): try: features.append(LineString(path.vertices)) except ValueError: # LineStrings must have at least 2 coordinate tuples pass return linemerge(features) def get_multipolygon( self, zmin: Optional[float] = None, zmax: Optional[float] = None ) -> MultiPolygon: """Calculate multipolygon covering mesh elements (hull) Parameters ---------- zmin : float or None Minimum elevation to consider for multipolygon extraction zmax : float or None Maximum elevation to consider for multipolygon extraction Returns ------- MultiPolygon Calculated multipolygon shape """ values = self.msh_t.value mask = np.ones(values.shape) if zmin is not None: mask = np.logical_and(mask, values > zmin) if zmax is not None: mask = np.logical_and(mask, values < zmax) # Assuming value is of shape (N, 1) # ravel to make sure it's 1D verts_in = np.argwhere(mask).ravel() clipped_mesh = utils.clip_mesh_by_vertex( self.msh_t, verts_in, can_use_other_verts=True) boundary_edges = utils.get_boundary_edges(clipped_mesh) coords = clipped_mesh.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} poly_gen = polygonize(coords[boundary_edges]) polys = list(poly_gen) polys = sorted(polys, key=lambda p: p.area, reverse=True) rings = [p.exterior for p in polys] n_parents = np.zeros((len(rings),)) represent = np.array([r.coords[0] for r in rings]) for e, ring in enumerate(rings[:-1]): path = Path(ring.coords, closed=True) n_parents = n_parents + np.pad( np.array([ path.contains_point(pt) for pt in represent[e+1:]]), (e+1, 0), 'constant', constant_values=0) # Get actual polygons based on logic described above polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2] return MultiPolygon(polys) @property def vert2(self): """Reference to underlying mesh 2D vertices structure""" return self.msh_t.vert2 @property def value(self): """Reference to underlying mesh values""" return self.msh_t.value @property def bbox(self): """Calculates and returns bounding box of the mesh hull. See Also -------- get_bbox """ return self.get_bbox() MeshType = Union[EuclideanMesh2D] class Mesh(BaseMesh): """Mesh object factory Factory class that creates and returns concrete mesh object based on the input types. Methods ------- open(path, crs=None) Read mesh data from a file on disk. """ def __new__(cls, mesh: jigsaw_msh_t) -> MeshType: """Construct a concrete mesh object. Parameters ---------- mesh : jigsaw_msh_t Input jigsaw mesh object Returns ------- MeshType Mesh object created from the input Raises ------ TypeError Input `mesh` is not a `jigsaw_msh_t` object. NotImplementedError Input `mesh` object cannot be used to create a EuclideanMesh2D """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID == 'euclidean-mesh': if mesh.ndims == 2: return EuclideanMesh2D(mesh) raise NotImplementedError( f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not ' 'handled.') raise NotImplementedError(f'mshID={mesh.mshID} not handled.') @staticmethod def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType: """Read mesh from a file on disk Parameters ---------- path : path-like Path to the file containig mesh. crs : CRS or None, default=None CRS of the mesh in the path. Overwrites any info read from file, no transformation is done. Returns ------- MeshType Mesh object created by reading the file. Raises ------ TypeError If cannot determine the input mesh type. Notes ----- Currently only SMS-2DM and GRD formats are supported for reading. """ try: msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs)) msh_t.value = np.negative(msh_t.value) return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 if 'not a valid grd file' in str(e): pass else: raise e try: return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs))) except ValueError: pass try: msh_t = jigsaw_msh_t() loadmsh(msh_t, path) msh_t.crs = crs return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 pass raise TypeError( f'Unable to automatically determine file type for {str(path)}.') class Rings: """Helper class for handling mesh rings. This is a helper class to manage the calculation of internal and external rings of the mesh polygon or hull. Attributes ---------- Methods ------- __call__() Returns all rings of the mesh hull interior() Return the interior rings of the mesh hull exterior() Return the exterior rings of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the ring calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates rings. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ polys = utils.get_mesh_polygons(self.mesh.msh_t) data = [] bnd_id = 0 for poly in polys: data.append({ "geometry": poly.exterior, "bnd_id": bnd_id, "type": 'exterior' }) for interior in poly.interiors: data.append({ "geometry": interior, "bnd_id": bnd_id, "type": 'interior' }) bnd_id = bnd_id + 1 return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Extracts the exterior ring from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior ring of the mesh hull polygon. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Extracts the interior rings from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior rings of the mesh hull polygon. """ return self().loc[self()['type'] == 'interior'] class Edges: """Helper class for handling mesh boundary edges. Attributes ---------- Methods ------- __call__() Return all boundary edges of the mesh hull interior() Return the interior boundary edges of the mesh hull exterior() Return the exterior boundary edges of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the edge calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which boundary edges are calculated. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for ring in self.mesh.hull.rings().itertuples(): coords = ring.geometry.coords for i in range(1, len(coords)): data.append({ "geometry": LineString([coords[i-1], coords[i]]), "bnd_id": ring.bnd_id, "type": ring.type}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Retruns exterior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Retruns interior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'interior'] class Hull: """Helper class for handling mesh hull calculations. This class wraps the functionality of ring and edge classes and adds additional methods to calculate or extract the polygon or triangulation of the mesh Attributes ---------- Methods ------- __call__() Calculates all the polys from all mesh rings exterior() Calculates the exterior rings of the mesh hull. interior() Calculates the interior rings of the mesh hull. implode() Calculates all the polygons (including isolated domain islands) in the mesh and returns a table of polygons. multipolygon() Calculates all the polygons (including isolated domain islands) in the mesh and returns a multipolygon. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize helper class for handling mesh hull calculations Parameters ---------- mesh : EuclideanMesh Input mesh for which hull calculations are done. Notes ----- This object holds onto the ring and edge calculator objects as well as a reference to the input mesh. """ self.mesh = mesh self.rings = Rings(mesh) self.edges = Edges(mesh) @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for bnd_id in np.unique(self.rings()['bnd_id'].tolist()): exterior = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior')] interiors = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior')] data.append({ "geometry": Polygon( exterior.iloc[0].geometry.coords, [row.geometry.coords for _, row in interiors.iterrows()]), "bnd_id": bnd_id }) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Creates polygons from exterior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from exterior rings of the mesh hull """ data = [] for exterior in self.rings().loc[ self.rings()['type'] == 'exterior'].itertuples(): data.append({"geometry": Polygon(exterior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def interior(self) -> gpd.GeoDataFrame: """Creates polygons from interior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from interior rings of the mesh hull """ data = [] for interior in self.rings().loc[ self.rings()['type'] == 'interior'].itertuples(): data.append({"geometry": Polygon(interior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def implode(self) -> gpd.GeoDataFrame: """Creates a dataframe from mesh polygons. Parameters ---------- Returns ------ gpd.GeoDataFrame Dataframe containing polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The difference of the return value of this method and `__call__` is that the `implode` returns a dataframe with a single `MultiPolygon` where as `__call__` returns a dataframe with multiple `Polygon` entries with associated `bnd_id`. """ return gpd.GeoDataFrame( {"geometry": MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) def multipolygon(self) -> MultiPolygon: """Returns mesh multi-polygons. Parameters ---------- Returns ------ MultiPolygon Combined shape of polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. implode() Dataframe with a single combined multipolygon of the mesh polygons. Notes ----- The difference of the return value of this method and `implode` is that `multipolygon` returns a `MultiPolygon` object where as `implode` returns a dataframe warpping the multipolygon object. """ mp = self.implode().iloc[0].geometry if isinstance(mp, Polygon): mp = MultiPolygon([mp]) return mp def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.mesh.msh_t.tria3['index'].tolist() for quad in self.mesh.msh_t.quad4['index']: triangles.extend([ [quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]] ]) return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) class Nodes: """Helper class for handling mesh nodes. Attributes ---------- id_to_index : dict Mapping to convert node IDs to node indexes. index_to_id : dict Mapping to convert node indexes to node IDs. Methods ------- __call__() Creates a mapping between node IDs (index + 1) and node coordinates id() Returns list of node IDs. index() Return array of node indices. coords() Return mesh coordinates. values() Return values stored for mesh nodes. get_index_by_id(node_id) Get the node index based on node ID. get_id_by_index(index) Get the node ID based on the node index. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes node handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles nodes info. """ self.mesh = mesh self._id_to_index = None self._index_to_id = None @lru_cache(maxsize=1) def __call__(self) -> Dict[int, int]: """Creates a mapping between node IDs and indexes. Parameters ---------- Returns ------- dict Mapping between node IDs and indexes. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return {i+1: coord for i, coord in enumerate(self.coords())} def id(self) -> List[int]: """Retrives a list of element IDs. Parameters ---------- Returns ------- list of int List of node IDs as created by `__call__` """ return list(self().keys()) def index(self) -> npt.NDArray[int]: """Retrives an array of element indexes. Parameters ---------- Returns ------- array-like Array of node indexes. """ return np.arange(len(self())) def coords(self) -> npt.NDArray[np.float32]: """Retrieve the coordinates of mesh nodes Parameters ---------- Returns ------- array-like Coordinates of the mesh nodes as returned by `BaseMesh.coord` """ return self.mesh.coord def values(self): """Retrieve the values stored for mesh nodes Parameters ---------- Returns ------- array-like Values on the mesh nodes as returned by `BaseMesh.values` """ return self.mesh.values def get_index_by_id(self, node_id): """Converts mesh ID to mesh index. Parameters ---------- node_id : int ID of the node of interest Returns ------- int Index of the node of interest """ return self.id_to_index[node_id] def get_id_by_index(self, index: int): """Converts mesh index to mesh ID. Parameters ---------- index : int Index of the node of interest. Returns ------- int ID of the node of interest """ return self.index_to_id[index] @property def id_to_index(self) -> Dict[int, int]: """Read-only property returning the mapping of ID to index Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._id_to_index is None: self._id_to_index = {node_id: index for index, node_id in enumerate(self().keys())} return self._id_to_index @property def index_to_id(self) -> Dict[int, int]: """Read-only property returning the mapping of index to ID Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._index_to_id is None: self._index_to_id = dict(enumerate(self().keys())) return self._index_to_id # def get_indexes_around_index(self, index): # indexes_around_index = self.__dict__.get('indexes_around_index') # if indexes_around_index is None: # def append(geom): # for simplex in geom: # for i, j in permutations(simplex, 2): # indexes_around_index[i].add(j) # indexes_around_index = defaultdict(set) # append(self.gr3.elements.triangles()) # append(self.gr3.elements.quads()) # self.__dict__['indexes_around_index'] = indexes_around_index # return list(indexes_around_index[index]) class Elements: """Helper class for handling mesh elements. Attributes ---------- Methods -------- __call__() Creates a mapping between element IDs and associated node IDs. id() Returns a list of element IDs. index() Returns an array of element indexes. array() Creates and returns a masked array of element node indices. triangles() Creates and returns a 2D array of triangular element node indices. quads() Creates and returns a 2D array of quadrangular element node indices. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. geodataframe() Creates and returns a dataframe of with polygon entires for each element. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize the element handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles elements info. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> Dict[int, npt.NDArray[int]]: """Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ elements = {i+1: index+1 for i, index in enumerate(self.mesh.msh_t.tria3['index'])} elements.update({i+len(elements)+1: index+1 for i, index in enumerate(self.mesh.msh_t.quad4['index'])}) return elements @lru_cache(maxsize=1) def id(self) -> List[int]: """Retrieves the list of element IDs as returned by `__call__` Parameters ---------- Returns ------- list of int List of element IDs. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return list(self().keys()) @lru_cache(maxsize=1) def index(self) -> npt.NDArray[int]: """Retrieves an array of element indices Parameters ---------- Returns ------- npt.NDArray 1D array of element indices. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.arange(len(self())) def array(self) -> npt.NDArray[int]: """Retrieves a masked array of element node IDs. The return value is ``n x m`` where ``n`` is the number of elements and ``m`` is the maximum number of element nodes, e.g. if there are only trias, then it's 3, for trias and quads it is 4. Parameters ---------- Returns ------- npt.NDArray Masked array where elements with fewer associated nodes have trailing masked node columns in the array. """ rank = int(max(map(len, self().values()))) array = np.full((len(self()), rank), -1) for i, elem_nd_ids in enumerate(self().values()): row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))) array[i, :len(row)] = row return np.ma.masked_equal(array, -1) @lru_cache(maxsize=1) def triangles(self) -> npt.NDArray[int]: """Retrieves an array of tria element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for triangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 3]) @lru_cache(maxsize=1) def quads(self): """Retrieves an array of quad element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for quadrangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 4]) def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.triangles().tolist() for quad in self.quads(): # TODO: Not tested. triangles.append([quad[0], quad[1], quad[3]]) triangles.append([quad[1], quad[2], quad[3]]) return Triangulation( self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) def geodataframe(self) -> gpd.GeoDataFrame: """Create polygons for each element and return in dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe created from entries of `Polygon` type for each element. """ data = [] for elem_id, elem_nd_ids in self().items(): data.append({ 'geometry': Polygon( self.mesh.coord[list( map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) class Boundaries: """Helper class for mesh boundary condition calculation Attributes ---------- data : dict Mapping for boundary information Methods ------- __call__() Retrieves a dataframe for all boundary shapes and type info. __len__() Gets the number of calculated boundary segments. ocean() Retrieves a dataframe containing shapes and type info of ocean boundaries land() Retrieves a dataframe containing shapes and type info of land boundaries interior() Retrieves a dataframe containing shapes and type info of island boundaries auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1) Automatically generate boundary information based on the input land indicator `threshold` """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize boundary helper object Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates boundaries. """ # TODO: Add a way to manually initialize self.mesh = mesh self._ocean = gpd.GeoDataFrame() self._land = gpd.GeoDataFrame() self._interior = gpd.GeoDataFrame() self._data = defaultdict(defaultdict) @lru_cache(maxsize=1) def _init_dataframes(self) -> None: """Internal: Creates boundary dataframes based on boundary data Parameters ---------- Returns ------- None Notes ----- This method doesn't have any return value, but it is cached so that on re-execution it doesn't recalculate. """ boundaries = self._data ocean_boundaries = [] land_boundaries = [] interior_boundaries = [] if boundaries is not None: for ibtype, bnds in boundaries.items(): if ibtype is None: for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) ocean_boundaries.append({ 'id': bnd_id, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) elif str(ibtype).endswith('1'): for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) interior_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) else: for bnd_id, data in bnds.items(): _indexes = np.array(data['indexes']) if _indexes.ndim > 1: # ndim > 1 implies we're dealing with an ADCIRC # mesh that includes boundary pairs, such as weir new_indexes = [] for i, line in enumerate(_indexes.T): if i % 2 != 0: new_indexes.extend(np.flip(line)) else: new_indexes.extend(line) _indexes = np.array(new_indexes).flatten() else: _indexes = _indexes.flatten() indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes)) land_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) self._ocean = gpd.GeoDataFrame(ocean_boundaries) self._land = gpd.GeoDataFrame(land_boundaries) self._interior = gpd.GeoDataFrame(interior_boundaries) def ocean(self) -> gpd.GeoDataFrame: """Retrieve the ocean boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of ocean open boundary. """ self._init_dataframes() return self._ocean def land(self): """Retrieve the land boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of land boundary. """ self._init_dataframes() return self._land def interior(self): """Retrieve the island boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of island boundary. """ self._init_dataframes() return self._interior @property def data(self) -> Dict[Optional[int], Any]: """Read-only property referencing the boundary data dictionary""" return self._data @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ self._init_dataframes() data = [] for bnd in self.ocean().itertuples(): data.append({ 'id': bnd.id, 'ibtype': None, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.land().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.interior().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def __len__(self) -> int: """Returns the number of boundary segments""" return len(self()) def auto_generate( self, threshold: float = 0., land_ibtype: int = 0, interior_ibtype: int = 1, ): """Automatically detect boundaries based on elevation data. Parameters ---------- threshold : float, default=0 Threshold above which nodes are considered dry nodes for ocean vs land boundary detection land_ibtype : int, default=0 Value to assign to land boundary type interior_ibtype : int, default=1 Value to assign to island boundary type Returns ------- None Raises ------ ValueError If any of the values assigned to a mesh node is `np.nan`. Notes ----- An edge is considered dry if any of the attached nodes are dry (its elevation is larger than or equal to the `threshold`). """ values = self.mesh.value if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") coords = self.mesh.msh_t.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} polys = utils.get_mesh_polygons(self.mesh.msh_t) # TODO: Split using shapely to get bdry segments boundaries = defaultdict(defaultdict) bdry_type = dict get_id = self.mesh.nodes.get_id_by_index # generate exterior boundaries for poly in polys: ext_ring_coo = poly.exterior.coords ext_ring = np.array([ (coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[e + 1]]) for e, coo in enumerate(ext_ring_coo[:-1])]) # find boundary edges edge_tag = np.full(ext_ring.shape, 0) edge_tag[ np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1 edge_tag[ np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1 edge_tag[ np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1 edge_tag[ np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1 # sort boundary edges ocean_boundary = [] land_boundary = [] for i, (e0, e1) in enumerate(edge_tag): if np.any(np.asarray((e0, e1)) == 1): land_boundary.append(tuple(ext_ring[i, :])) elif np.any(np.asarray((e0, e1)) == -1): ocean_boundary.append(tuple(ext_ring[i, :])) # ocean_boundaries = utils.sort_edges(ocean_boundary) # land_boundaries = utils.sort_edges(land_boundary) ocean_boundaries = [] if len(ocean_boundary) != 0: #pylint: disable=not-an-iterable ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist()) ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs ocean_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in ocean_segs] land_boundaries = [] if len(land_boundary) != 0: #pylint: disable=not-an-iterable land_segs = linemerge(coords[np.array(land_boundary)].tolist()) land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs land_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in land_segs] _bnd_id = len(boundaries[None]) for bnd in ocean_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[None][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # add land boundaries _bnd_id = len(boundaries[land_ibtype]) for bnd in land_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[land_ibtype][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # generate interior boundaries _bnd_id = 0 interior_boundaries = defaultdict() for poly in polys: interiors = poly.interiors for interior in interiors: int_ring_coo = interior.coords int_ring = [ (coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[e + 1]]) for e, coo in enumerate(int_ring_coo[:-1])] # TODO: Do we still need these? e0, e1 = [list(t) for t in zip(*int_ring)] if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0: e0 = e0[::-1] e1 = e1[::-1] e0 = [get_id(vert) for vert in e0] e0.append(e0[0]) interior_boundaries[_bnd_id] = e0 _bnd_id += 1 for bnd_id, data in interior_boundaries.items(): boundaries[interior_ibtype][bnd_id] = bdry_type( indexes=data, properties={}) self._data = boundaries self._init_dataframes.cache_clear() self.__call__.cache_clear() self._init_dataframes() SortedRingType = Dict[int, Dict[Literal['exterior', 'interiors'], Union[npt.NDArray, List[npt.NDArray]]] ] def sort_rings( index_rings: List[List[Tuple[int, int]]], vertices: npt.NDArray[np.float32]) -> SortedRingType: """Sorts a list of index-rings. Takes a list of unsorted index rings and sorts them into "exterior" and "interior" components. Any doubly-nested rings are considered exterior rings. Parameters ---------- index_rings : List[List[Tuple[int, int]]] Unosorted list of list of mesh edges as specified by end node indexs of each edge. vertices : npt.NDArray[np.float32] 2D ``n x 2`` array of node coordinate couples. Returns ------- SortedRingType Dictionary of information aboout polygon boundaries extracted based on the input Notes ----- The return value is a mapping of ring index to dictionary containing exterior and interior linear ring information as numpy array This function is not currently used, instead a different faster approach is used for boundary and polygon calculation from elements. """ # TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can # probably be optimized using shapely. # sort index_rings into corresponding "polygons" areas = [] for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = {} _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = [] for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = [] for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings def _mesh_interpolate_worker( coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[str, Path], chunk_size: Optional[int], method: Literal['spline', 'linear', 'nearest'] = "spline", filter_by_shape: bool = False): """Interpolator worker function to be used in parallel calls Parameters ---------- coords : npt.NDArray[np.float32] Mesh node coordinates. coords_crs : CRS Coordinate reference system of the input mesh coordinates. raster_path : str or Path Path to the raster temporary working file. chunk_size : int or None Chunk size for windowing over the raster. method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- idxs : npt.NDArray[bool] Mask of the nodes whose values are updated by current interpolation values : npt.NDArray[np.float32] Interpolated values. Raises ------ ValueError If specified interpolation `method` is not supported. """ coords = np.array(coords) raster = Raster(raster_path) idxs = [] values = [] for window in raster.iter_windows(chunk_size=chunk_size, overlap=2): if not raster.crs.equals(coords_crs): transformer = Transformer.from_crs( coords_crs, raster.crs, always_xy=True) # pylint: disable=E0633 coords[:, 0], coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) xi = raster.get_x(window) yi = raster.get_y(window) # Use masked array to ignore missing values from DEM zi = raster.get_values(window=window, masked=True) if not filter_by_shape: _idxs = np.logical_and( np.logical_and( np.min(xi) <= coords[:, 0], np.max(xi) >= coords[:, 0]), np.logical_and( np.min(yi) <= coords[:, 1], np.max(yi) >= coords[:, 1])) else: shape = raster.get_multipolygon() gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1]) _idxs = gs_pt.intersects(shape) interp_mask = None if method == 'spline': f = RectBivariateSpline( xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0, # bbox=[min(x), max(x), min(y), max(y)] # ?? ) _values = f.ev(coords[_idxs, 0], coords[_idxs, 1]) elif method in ['nearest', 'linear']: # Inspired by StackOverflow 35807321 if np.any(zi.mask): m_interp = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method ) # Pick nodes NOT "contaminated" by masked values interp_mask = m_interp(coords[_idxs]) > 0 f = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi).T, method=method ) _values = f(coords[_idxs]) else: raise ValueError( f"Invalid value method specified <{method}>!") if interp_mask is not None: # pylint: disable=invalid-unary-operand-type helper = np.ones_like(_values).astype(bool) helper[interp_mask] = False # _idxs is inverse mask _idxs[_idxs] = helper _values = _values[~interp_mask] idxs.append(_idxs) values.append(_values) return (np.hstack(idxs), np.hstack(values))
__call__
Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values.
"""This module defines classes that handle mesh and mesh operations. This module defines a factory class for mesh, similar to geometry and size function factory class. It also defines concrete mesh types. Currently two concrete mesh types are defined for generic Eucledian mesh and specific 2D Eucledian mesh. """ from functools import lru_cache import logging from multiprocessing import Pool, cpu_count import os import pathlib from collections import defaultdict import warnings from typing import Union, List, Tuple, Dict, Any, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal import pandas as pd import geopandas as gpd from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk from matplotlib.path import Path from matplotlib.transforms import Bbox from matplotlib.tri import Triangulation from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np import numpy.typing as npt from pyproj import CRS, Transformer from scipy.interpolate import ( RectBivariateSpline, RegularGridInterpolator) from shapely.geometry import ( LineString, box, Polygon, MultiPolygon) from shapely.ops import polygonize, linemerge from ocsmesh import utils from ocsmesh.raster import Raster from ocsmesh.mesh.base import BaseMesh from ocsmesh.mesh.parsers import grd, sms2dm _logger = logging.getLogger(__name__) class EuclideanMesh(BaseMesh): """Generic Euclidean mesh class This is the base class for 2D or 3D Euclidean mesh. Attributes ---------- tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t] Reference to underlying jigsaw mesh's triangle element structure. triangles : npt.NDArray[np.float32] Array of node index for triangular elements. quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t] Reference to underlying jigsaw mesh's quadrangle element structure. quads : npt.NDArray[np.float32] Array of node index for quadrangular elements. crs : CRS Coodrinate reference system of the mesh object hull : Hull Handle to hull calculation helper object nodes : Nodes Handle to node handler helper object elements : Elements Handle to element handler helper object Methods ------- write(path, overwrite=False, format='grd') Export mesh object to the disk in the specified format. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type. """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID != 'euclidean-mesh': raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, ' "but expected 'euclidean-mesh'.") if not hasattr(mesh, 'crs'): warnings.warn('Input mesh has no CRS information.') mesh.crs = None else: if not isinstance(mesh.crs, CRS): raise ValueError(f'crs property must be of type {CRS}, not ' f'type {type(mesh.crs)}.') self._hull = None self._nodes = None self._elements = None self._msh_t = mesh def write( self, path: Union[str, os.PathLike], overwrite: bool = False, format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622 ) -> None: """Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported. """ path = pathlib.Path(path) if path.exists() and overwrite is not True: raise IOError( f'File {str(path)} exists and overwrite is not True.') if format == 'grd': grd_dict = utils.msh_t_to_grd(self.msh_t) if self._boundaries and self._boundaries.data: grd_dict.update(boundaries=self._boundaries.data) grd.write(grd_dict, path, overwrite) elif format == '2dm': sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite) elif format == 'msh': savemsh(str(path), self.msh_t) elif format == 'vtk': savevtk(str(path), self.msh_t) else: raise ValueError(f'Unhandled format {format}.') @property def tria3(self): """Reference to underlying mesh tirangle element structure""" return self.msh_t.tria3 @property def triangles(self): """Reference to underlying mesh triangle element index array""" return self.msh_t.tria3['index'] @property def quad4(self): """Reference to underlying mesh quadrangle element structure""" return self.msh_t.quad4 @property def quads(self): """Reference to underlying mesh quadrangle element index array""" return self.msh_t.quad4['index'] @property def crs(self): """Reference to underlying mesh crs""" return self.msh_t.crs @property def hull(self): """Reference to hull calculator helper object""" if self._hull is None: self._hull = Hull(self) return self._hull @property def nodes(self): """Reference to node handler helper object""" if self._nodes is None: self._nodes = Nodes(self) return self._nodes @property def elements(self): """Reference to element handler helper object""" if self._elements is None: self._elements = Elements(self) return self._elements class EuclideanMesh2D(EuclideanMesh): """2D Euclidean mesh definition Attributes ---------- boundaries vert2 value bbox Methods ------- get_bbox(crs=None, output_type=None) Gets the bounding box of the mesh elements. tricontourf(**kwargs) Create a contour plot from the value data on the nodes of the mesh interpolate(raster, method='spline', nprocs=None) Interpolate raster date on the nodes. get_contour(level) Get contour lines from node value data at specified levels. get_multipolygon(zmin=None, zmax=None) Get multipolygon of the mesh hull. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``. """ super().__init__(mesh) self._boundaries = None if mesh.ndims != +2: raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, ' "but expected ndims=2.") if len(self.msh_t.value) == 0: self.msh_t.value = np.array( np.full((self.vert2['coord'].shape[0], 1), np.nan)) def get_bbox( self, crs: Union[str, CRS, None] = None, output_type: Literal[None, 'polygon', 'bbox'] = None ) -> Union[Polygon, Bbox]: """Get the bounding box of mesh elements. Parameters ---------- crs : str or CRS or None, default=None CRS to transform the calculated bounding box into before returning output_type : { None, 'polygon', 'bbox'}, default=None Output type Returns ------- Polygon or Bbox Bounding box of the mesh elements. """ output_type = 'polygon' if output_type is None else output_type xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0]) ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1]) crs = self.crs if crs is None else crs if crs is not None: if not self.crs.equals(crs): transformer = Transformer.from_crs( self.crs, crs, always_xy=True) # pylint: disable=E0633 (xmin, xmax), (ymin, ymax) = transformer.transform( (xmin, xmax), (ymin, ymax)) if output_type == 'polygon': # pylint: disable=R1705 return box(xmin, ymin, xmax, ymax) elif output_type == 'bbox': return Bbox([[xmin, ymin], [xmax, ymax]]) raise TypeError( 'Argument output_type must a string literal \'polygon\' or ' '\'bbox\'') @property def boundaries(self): """Handle to boundaries calculator helper object""" if self._boundaries is None: self._boundaries = Boundaries(self) return self._boundaries def tricontourf(self, **kwargs) -> Axes: """Generate contour for the data of triangular elements of the mesh Parameters ---------- **kwargs : dict, optional Passed to underlying `matplotlib` API. Returns ------- Axes Axes on which the filled contour is drawn. """ return utils.tricontourf(self.msh_t, **kwargs) def interpolate( self, raster: Union[Raster, List[Raster]], method: Literal['spline', 'linear', 'nearest'] = 'spline', nprocs: Optional[int] = None, info_out_path: Union[pathlib.Path, str, None] = None, filter_by_shape: bool = False ) -> None: """Interplate values from raster inputs to the mesh nodes. Parameters ---------- raster : Raster or list of Raster A single or a list of rasters from which values are interpolated onto the mesh method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. nprocs : int or None, default=None Number of workers to use when interpolating data. info_out_path : pathlike or str or None Path for the output node interpolation information file filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- None """ if isinstance(raster, Raster): raster = [raster] nprocs = -1 if nprocs is None else nprocs nprocs = cpu_count() if nprocs == -1 else nprocs # Fix an issue on Jupyter notebook where having pool execute # interpolation even in case of nprocs == 1 would results in # application getting stuck if nprocs > 1: with Pool(processes=nprocs) as pool: res = pool.starmap( _mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] ) pool.join() else: res = [_mesh_interpolate_worker( self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] values = self.msh_t.value.flatten() interp_info_map = {} for (mask, _values), rast in zip(res, raster): values[mask] = _values if info_out_path is not None: vert_cs = None rast_crs = rast.crs if rast_crs.is_vertical: if rast_crs.sub_crs_list is not None: for sub_crs in rast_crs.sub_crs_list: if sub_crs.is_vertical: # TODO: What if sub CRS is compound, etc.? vert_cs = sub_crs elif rast_crs.source_crs is not None: if rast_crs.source_crs.is_vertical: # TODO: What if source CRS is compound, etc.? vert_cs = rast_crs.source_crs vert_cs_name = vert_cs.name idxs = np.argwhere(mask).ravel() interp_info_map.update({ idx: (rast.path, vert_cs_name) for idx in idxs}) if info_out_path is not None: coords = self.msh_t.vert2['coord'].copy() geo_coords = coords.copy() if not self.crs.is_geographic: transformer = Transformer.from_crs( self.crs, CRS.from_epsg(4326), always_xy=True) # pylint: disable=E0633 geo_coords[:, 0], geo_coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) vd_idxs=np.array(list(interp_info_map.keys())) df_interp_info = pd.DataFrame( index=vd_idxs, data={ 'x': coords[vd_idxs, 0], 'y': coords[vd_idxs, 1], 'lat': geo_coords[vd_idxs, 0], 'lon': geo_coords[vd_idxs, 1], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()] } ) df_interp_info.sort_index().to_csv( info_out_path, header=False, index=True) self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) def get_contour(self, level: float) -> LineString: """Extract contour lines at the specified `level` from mesh values Parameters ---------- level : float The level at which contour lines must be extracted. Returns ------- LineString Extracted and merged contour lines. Raises ------ ValueError If mesh has nodes that have null value `np.nan`. """ # ONLY SUPPORTS TRIANGLES for attr in ['quad4', 'hexa8']: if len(getattr(self.msh_t, attr)) > 0: warnings.warn( 'Mesh contour extraction only supports triangles') coords = self.msh_t.vert2['coord'] values = self.msh_t.value trias = self.msh_t.tria3['index'] if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") x, y = coords[:, 0], coords[:, 1] features = [] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) _logger.debug('Computing contours...') fig, ax = plt.subplots() ax.tricontour( x, y, trias, values.ravel(), levels=[level]) plt.close(fig) for path_collection in ax.collections: for path in path_collection.get_paths(): try: features.append(LineString(path.vertices)) except ValueError: # LineStrings must have at least 2 coordinate tuples pass return linemerge(features) def get_multipolygon( self, zmin: Optional[float] = None, zmax: Optional[float] = None ) -> MultiPolygon: """Calculate multipolygon covering mesh elements (hull) Parameters ---------- zmin : float or None Minimum elevation to consider for multipolygon extraction zmax : float or None Maximum elevation to consider for multipolygon extraction Returns ------- MultiPolygon Calculated multipolygon shape """ values = self.msh_t.value mask = np.ones(values.shape) if zmin is not None: mask = np.logical_and(mask, values > zmin) if zmax is not None: mask = np.logical_and(mask, values < zmax) # Assuming value is of shape (N, 1) # ravel to make sure it's 1D verts_in = np.argwhere(mask).ravel() clipped_mesh = utils.clip_mesh_by_vertex( self.msh_t, verts_in, can_use_other_verts=True) boundary_edges = utils.get_boundary_edges(clipped_mesh) coords = clipped_mesh.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} poly_gen = polygonize(coords[boundary_edges]) polys = list(poly_gen) polys = sorted(polys, key=lambda p: p.area, reverse=True) rings = [p.exterior for p in polys] n_parents = np.zeros((len(rings),)) represent = np.array([r.coords[0] for r in rings]) for e, ring in enumerate(rings[:-1]): path = Path(ring.coords, closed=True) n_parents = n_parents + np.pad( np.array([ path.contains_point(pt) for pt in represent[e+1:]]), (e+1, 0), 'constant', constant_values=0) # Get actual polygons based on logic described above polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2] return MultiPolygon(polys) @property def vert2(self): """Reference to underlying mesh 2D vertices structure""" return self.msh_t.vert2 @property def value(self): """Reference to underlying mesh values""" return self.msh_t.value @property def bbox(self): """Calculates and returns bounding box of the mesh hull. See Also -------- get_bbox """ return self.get_bbox() MeshType = Union[EuclideanMesh2D] class Mesh(BaseMesh): """Mesh object factory Factory class that creates and returns concrete mesh object based on the input types. Methods ------- open(path, crs=None) Read mesh data from a file on disk. """ def __new__(cls, mesh: jigsaw_msh_t) -> MeshType: """Construct a concrete mesh object. Parameters ---------- mesh : jigsaw_msh_t Input jigsaw mesh object Returns ------- MeshType Mesh object created from the input Raises ------ TypeError Input `mesh` is not a `jigsaw_msh_t` object. NotImplementedError Input `mesh` object cannot be used to create a EuclideanMesh2D """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID == 'euclidean-mesh': if mesh.ndims == 2: return EuclideanMesh2D(mesh) raise NotImplementedError( f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not ' 'handled.') raise NotImplementedError(f'mshID={mesh.mshID} not handled.') @staticmethod def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType: """Read mesh from a file on disk Parameters ---------- path : path-like Path to the file containig mesh. crs : CRS or None, default=None CRS of the mesh in the path. Overwrites any info read from file, no transformation is done. Returns ------- MeshType Mesh object created by reading the file. Raises ------ TypeError If cannot determine the input mesh type. Notes ----- Currently only SMS-2DM and GRD formats are supported for reading. """ try: msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs)) msh_t.value = np.negative(msh_t.value) return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 if 'not a valid grd file' in str(e): pass else: raise e try: return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs))) except ValueError: pass try: msh_t = jigsaw_msh_t() loadmsh(msh_t, path) msh_t.crs = crs return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 pass raise TypeError( f'Unable to automatically determine file type for {str(path)}.') class Rings: """Helper class for handling mesh rings. This is a helper class to manage the calculation of internal and external rings of the mesh polygon or hull. Attributes ---------- Methods ------- __call__() Returns all rings of the mesh hull interior() Return the interior rings of the mesh hull exterior() Return the exterior rings of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the ring calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates rings. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ polys = utils.get_mesh_polygons(self.mesh.msh_t) data = [] bnd_id = 0 for poly in polys: data.append({ "geometry": poly.exterior, "bnd_id": bnd_id, "type": 'exterior' }) for interior in poly.interiors: data.append({ "geometry": interior, "bnd_id": bnd_id, "type": 'interior' }) bnd_id = bnd_id + 1 return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Extracts the exterior ring from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior ring of the mesh hull polygon. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Extracts the interior rings from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior rings of the mesh hull polygon. """ return self().loc[self()['type'] == 'interior'] class Edges: """Helper class for handling mesh boundary edges. Attributes ---------- Methods ------- __call__() Return all boundary edges of the mesh hull interior() Return the interior boundary edges of the mesh hull exterior() Return the exterior boundary edges of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the edge calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which boundary edges are calculated. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for ring in self.mesh.hull.rings().itertuples(): coords = ring.geometry.coords for i in range(1, len(coords)): data.append({ "geometry": LineString([coords[i-1], coords[i]]), "bnd_id": ring.bnd_id, "type": ring.type}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Retruns exterior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Retruns interior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'interior'] class Hull: """Helper class for handling mesh hull calculations. This class wraps the functionality of ring and edge classes and adds additional methods to calculate or extract the polygon or triangulation of the mesh Attributes ---------- Methods ------- __call__() Calculates all the polys from all mesh rings exterior() Calculates the exterior rings of the mesh hull. interior() Calculates the interior rings of the mesh hull. implode() Calculates all the polygons (including isolated domain islands) in the mesh and returns a table of polygons. multipolygon() Calculates all the polygons (including isolated domain islands) in the mesh and returns a multipolygon. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize helper class for handling mesh hull calculations Parameters ---------- mesh : EuclideanMesh Input mesh for which hull calculations are done. Notes ----- This object holds onto the ring and edge calculator objects as well as a reference to the input mesh. """ self.mesh = mesh self.rings = Rings(mesh) self.edges = Edges(mesh) @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for bnd_id in np.unique(self.rings()['bnd_id'].tolist()): exterior = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior')] interiors = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior')] data.append({ "geometry": Polygon( exterior.iloc[0].geometry.coords, [row.geometry.coords for _, row in interiors.iterrows()]), "bnd_id": bnd_id }) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Creates polygons from exterior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from exterior rings of the mesh hull """ data = [] for exterior in self.rings().loc[ self.rings()['type'] == 'exterior'].itertuples(): data.append({"geometry": Polygon(exterior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def interior(self) -> gpd.GeoDataFrame: """Creates polygons from interior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from interior rings of the mesh hull """ data = [] for interior in self.rings().loc[ self.rings()['type'] == 'interior'].itertuples(): data.append({"geometry": Polygon(interior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def implode(self) -> gpd.GeoDataFrame: """Creates a dataframe from mesh polygons. Parameters ---------- Returns ------ gpd.GeoDataFrame Dataframe containing polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The difference of the return value of this method and `__call__` is that the `implode` returns a dataframe with a single `MultiPolygon` where as `__call__` returns a dataframe with multiple `Polygon` entries with associated `bnd_id`. """ return gpd.GeoDataFrame( {"geometry": MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) def multipolygon(self) -> MultiPolygon: """Returns mesh multi-polygons. Parameters ---------- Returns ------ MultiPolygon Combined shape of polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. implode() Dataframe with a single combined multipolygon of the mesh polygons. Notes ----- The difference of the return value of this method and `implode` is that `multipolygon` returns a `MultiPolygon` object where as `implode` returns a dataframe warpping the multipolygon object. """ mp = self.implode().iloc[0].geometry if isinstance(mp, Polygon): mp = MultiPolygon([mp]) return mp def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.mesh.msh_t.tria3['index'].tolist() for quad in self.mesh.msh_t.quad4['index']: triangles.extend([ [quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]] ]) return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) class Nodes: """Helper class for handling mesh nodes. Attributes ---------- id_to_index : dict Mapping to convert node IDs to node indexes. index_to_id : dict Mapping to convert node indexes to node IDs. Methods ------- __call__() Creates a mapping between node IDs (index + 1) and node coordinates id() Returns list of node IDs. index() Return array of node indices. coords() Return mesh coordinates. values() Return values stored for mesh nodes. get_index_by_id(node_id) Get the node index based on node ID. get_id_by_index(index) Get the node ID based on the node index. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes node handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles nodes info. """ self.mesh = mesh self._id_to_index = None self._index_to_id = None @lru_cache(maxsize=1) def __call__(self) -> Dict[int, int]: """Creates a mapping between node IDs and indexes. Parameters ---------- Returns ------- dict Mapping between node IDs and indexes. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return {i+1: coord for i, coord in enumerate(self.coords())} def id(self) -> List[int]: """Retrives a list of element IDs. Parameters ---------- Returns ------- list of int List of node IDs as created by `__call__` """ return list(self().keys()) def index(self) -> npt.NDArray[int]: """Retrives an array of element indexes. Parameters ---------- Returns ------- array-like Array of node indexes. """ return np.arange(len(self())) def coords(self) -> npt.NDArray[np.float32]: """Retrieve the coordinates of mesh nodes Parameters ---------- Returns ------- array-like Coordinates of the mesh nodes as returned by `BaseMesh.coord` """ return self.mesh.coord def values(self): """Retrieve the values stored for mesh nodes Parameters ---------- Returns ------- array-like Values on the mesh nodes as returned by `BaseMesh.values` """ return self.mesh.values def get_index_by_id(self, node_id): """Converts mesh ID to mesh index. Parameters ---------- node_id : int ID of the node of interest Returns ------- int Index of the node of interest """ return self.id_to_index[node_id] def get_id_by_index(self, index: int): """Converts mesh index to mesh ID. Parameters ---------- index : int Index of the node of interest. Returns ------- int ID of the node of interest """ return self.index_to_id[index] @property def id_to_index(self) -> Dict[int, int]: """Read-only property returning the mapping of ID to index Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._id_to_index is None: self._id_to_index = {node_id: index for index, node_id in enumerate(self().keys())} return self._id_to_index @property def index_to_id(self) -> Dict[int, int]: """Read-only property returning the mapping of index to ID Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._index_to_id is None: self._index_to_id = dict(enumerate(self().keys())) return self._index_to_id # def get_indexes_around_index(self, index): # indexes_around_index = self.__dict__.get('indexes_around_index') # if indexes_around_index is None: # def append(geom): # for simplex in geom: # for i, j in permutations(simplex, 2): # indexes_around_index[i].add(j) # indexes_around_index = defaultdict(set) # append(self.gr3.elements.triangles()) # append(self.gr3.elements.quads()) # self.__dict__['indexes_around_index'] = indexes_around_index # return list(indexes_around_index[index]) class Elements: """Helper class for handling mesh elements. Attributes ---------- Methods -------- __call__() Creates a mapping between element IDs and associated node IDs. id() Returns a list of element IDs. index() Returns an array of element indexes. array() Creates and returns a masked array of element node indices. triangles() Creates and returns a 2D array of triangular element node indices. quads() Creates and returns a 2D array of quadrangular element node indices. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. geodataframe() Creates and returns a dataframe of with polygon entires for each element. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize the element handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles elements info. """ self.mesh = mesh # MASKED: __call__ function (lines 1330-1354) @lru_cache(maxsize=1) def id(self) -> List[int]: """Retrieves the list of element IDs as returned by `__call__` Parameters ---------- Returns ------- list of int List of element IDs. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return list(self().keys()) @lru_cache(maxsize=1) def index(self) -> npt.NDArray[int]: """Retrieves an array of element indices Parameters ---------- Returns ------- npt.NDArray 1D array of element indices. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.arange(len(self())) def array(self) -> npt.NDArray[int]: """Retrieves a masked array of element node IDs. The return value is ``n x m`` where ``n`` is the number of elements and ``m`` is the maximum number of element nodes, e.g. if there are only trias, then it's 3, for trias and quads it is 4. Parameters ---------- Returns ------- npt.NDArray Masked array where elements with fewer associated nodes have trailing masked node columns in the array. """ rank = int(max(map(len, self().values()))) array = np.full((len(self()), rank), -1) for i, elem_nd_ids in enumerate(self().values()): row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))) array[i, :len(row)] = row return np.ma.masked_equal(array, -1) @lru_cache(maxsize=1) def triangles(self) -> npt.NDArray[int]: """Retrieves an array of tria element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for triangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 3]) @lru_cache(maxsize=1) def quads(self): """Retrieves an array of quad element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for quadrangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 4]) def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.triangles().tolist() for quad in self.quads(): # TODO: Not tested. triangles.append([quad[0], quad[1], quad[3]]) triangles.append([quad[1], quad[2], quad[3]]) return Triangulation( self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) def geodataframe(self) -> gpd.GeoDataFrame: """Create polygons for each element and return in dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe created from entries of `Polygon` type for each element. """ data = [] for elem_id, elem_nd_ids in self().items(): data.append({ 'geometry': Polygon( self.mesh.coord[list( map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) class Boundaries: """Helper class for mesh boundary condition calculation Attributes ---------- data : dict Mapping for boundary information Methods ------- __call__() Retrieves a dataframe for all boundary shapes and type info. __len__() Gets the number of calculated boundary segments. ocean() Retrieves a dataframe containing shapes and type info of ocean boundaries land() Retrieves a dataframe containing shapes and type info of land boundaries interior() Retrieves a dataframe containing shapes and type info of island boundaries auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1) Automatically generate boundary information based on the input land indicator `threshold` """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize boundary helper object Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates boundaries. """ # TODO: Add a way to manually initialize self.mesh = mesh self._ocean = gpd.GeoDataFrame() self._land = gpd.GeoDataFrame() self._interior = gpd.GeoDataFrame() self._data = defaultdict(defaultdict) @lru_cache(maxsize=1) def _init_dataframes(self) -> None: """Internal: Creates boundary dataframes based on boundary data Parameters ---------- Returns ------- None Notes ----- This method doesn't have any return value, but it is cached so that on re-execution it doesn't recalculate. """ boundaries = self._data ocean_boundaries = [] land_boundaries = [] interior_boundaries = [] if boundaries is not None: for ibtype, bnds in boundaries.items(): if ibtype is None: for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) ocean_boundaries.append({ 'id': bnd_id, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) elif str(ibtype).endswith('1'): for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) interior_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) else: for bnd_id, data in bnds.items(): _indexes = np.array(data['indexes']) if _indexes.ndim > 1: # ndim > 1 implies we're dealing with an ADCIRC # mesh that includes boundary pairs, such as weir new_indexes = [] for i, line in enumerate(_indexes.T): if i % 2 != 0: new_indexes.extend(np.flip(line)) else: new_indexes.extend(line) _indexes = np.array(new_indexes).flatten() else: _indexes = _indexes.flatten() indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes)) land_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) self._ocean = gpd.GeoDataFrame(ocean_boundaries) self._land = gpd.GeoDataFrame(land_boundaries) self._interior = gpd.GeoDataFrame(interior_boundaries) def ocean(self) -> gpd.GeoDataFrame: """Retrieve the ocean boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of ocean open boundary. """ self._init_dataframes() return self._ocean def land(self): """Retrieve the land boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of land boundary. """ self._init_dataframes() return self._land def interior(self): """Retrieve the island boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of island boundary. """ self._init_dataframes() return self._interior @property def data(self) -> Dict[Optional[int], Any]: """Read-only property referencing the boundary data dictionary""" return self._data @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ self._init_dataframes() data = [] for bnd in self.ocean().itertuples(): data.append({ 'id': bnd.id, 'ibtype': None, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.land().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.interior().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def __len__(self) -> int: """Returns the number of boundary segments""" return len(self()) def auto_generate( self, threshold: float = 0., land_ibtype: int = 0, interior_ibtype: int = 1, ): """Automatically detect boundaries based on elevation data. Parameters ---------- threshold : float, default=0 Threshold above which nodes are considered dry nodes for ocean vs land boundary detection land_ibtype : int, default=0 Value to assign to land boundary type interior_ibtype : int, default=1 Value to assign to island boundary type Returns ------- None Raises ------ ValueError If any of the values assigned to a mesh node is `np.nan`. Notes ----- An edge is considered dry if any of the attached nodes are dry (its elevation is larger than or equal to the `threshold`). """ values = self.mesh.value if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") coords = self.mesh.msh_t.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} polys = utils.get_mesh_polygons(self.mesh.msh_t) # TODO: Split using shapely to get bdry segments boundaries = defaultdict(defaultdict) bdry_type = dict get_id = self.mesh.nodes.get_id_by_index # generate exterior boundaries for poly in polys: ext_ring_coo = poly.exterior.coords ext_ring = np.array([ (coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[e + 1]]) for e, coo in enumerate(ext_ring_coo[:-1])]) # find boundary edges edge_tag = np.full(ext_ring.shape, 0) edge_tag[ np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1 edge_tag[ np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1 edge_tag[ np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1 edge_tag[ np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1 # sort boundary edges ocean_boundary = [] land_boundary = [] for i, (e0, e1) in enumerate(edge_tag): if np.any(np.asarray((e0, e1)) == 1): land_boundary.append(tuple(ext_ring[i, :])) elif np.any(np.asarray((e0, e1)) == -1): ocean_boundary.append(tuple(ext_ring[i, :])) # ocean_boundaries = utils.sort_edges(ocean_boundary) # land_boundaries = utils.sort_edges(land_boundary) ocean_boundaries = [] if len(ocean_boundary) != 0: #pylint: disable=not-an-iterable ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist()) ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs ocean_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in ocean_segs] land_boundaries = [] if len(land_boundary) != 0: #pylint: disable=not-an-iterable land_segs = linemerge(coords[np.array(land_boundary)].tolist()) land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs land_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in land_segs] _bnd_id = len(boundaries[None]) for bnd in ocean_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[None][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # add land boundaries _bnd_id = len(boundaries[land_ibtype]) for bnd in land_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[land_ibtype][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # generate interior boundaries _bnd_id = 0 interior_boundaries = defaultdict() for poly in polys: interiors = poly.interiors for interior in interiors: int_ring_coo = interior.coords int_ring = [ (coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[e + 1]]) for e, coo in enumerate(int_ring_coo[:-1])] # TODO: Do we still need these? e0, e1 = [list(t) for t in zip(*int_ring)] if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0: e0 = e0[::-1] e1 = e1[::-1] e0 = [get_id(vert) for vert in e0] e0.append(e0[0]) interior_boundaries[_bnd_id] = e0 _bnd_id += 1 for bnd_id, data in interior_boundaries.items(): boundaries[interior_ibtype][bnd_id] = bdry_type( indexes=data, properties={}) self._data = boundaries self._init_dataframes.cache_clear() self.__call__.cache_clear() self._init_dataframes() SortedRingType = Dict[int, Dict[Literal['exterior', 'interiors'], Union[npt.NDArray, List[npt.NDArray]]] ] def sort_rings( index_rings: List[List[Tuple[int, int]]], vertices: npt.NDArray[np.float32]) -> SortedRingType: """Sorts a list of index-rings. Takes a list of unsorted index rings and sorts them into "exterior" and "interior" components. Any doubly-nested rings are considered exterior rings. Parameters ---------- index_rings : List[List[Tuple[int, int]]] Unosorted list of list of mesh edges as specified by end node indexs of each edge. vertices : npt.NDArray[np.float32] 2D ``n x 2`` array of node coordinate couples. Returns ------- SortedRingType Dictionary of information aboout polygon boundaries extracted based on the input Notes ----- The return value is a mapping of ring index to dictionary containing exterior and interior linear ring information as numpy array This function is not currently used, instead a different faster approach is used for boundary and polygon calculation from elements. """ # TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can # probably be optimized using shapely. # sort index_rings into corresponding "polygons" areas = [] for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = {} _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = [] for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = [] for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings def _mesh_interpolate_worker( coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[str, Path], chunk_size: Optional[int], method: Literal['spline', 'linear', 'nearest'] = "spline", filter_by_shape: bool = False): """Interpolator worker function to be used in parallel calls Parameters ---------- coords : npt.NDArray[np.float32] Mesh node coordinates. coords_crs : CRS Coordinate reference system of the input mesh coordinates. raster_path : str or Path Path to the raster temporary working file. chunk_size : int or None Chunk size for windowing over the raster. method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- idxs : npt.NDArray[bool] Mask of the nodes whose values are updated by current interpolation values : npt.NDArray[np.float32] Interpolated values. Raises ------ ValueError If specified interpolation `method` is not supported. """ coords = np.array(coords) raster = Raster(raster_path) idxs = [] values = [] for window in raster.iter_windows(chunk_size=chunk_size, overlap=2): if not raster.crs.equals(coords_crs): transformer = Transformer.from_crs( coords_crs, raster.crs, always_xy=True) # pylint: disable=E0633 coords[:, 0], coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) xi = raster.get_x(window) yi = raster.get_y(window) # Use masked array to ignore missing values from DEM zi = raster.get_values(window=window, masked=True) if not filter_by_shape: _idxs = np.logical_and( np.logical_and( np.min(xi) <= coords[:, 0], np.max(xi) >= coords[:, 0]), np.logical_and( np.min(yi) <= coords[:, 1], np.max(yi) >= coords[:, 1])) else: shape = raster.get_multipolygon() gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1]) _idxs = gs_pt.intersects(shape) interp_mask = None if method == 'spline': f = RectBivariateSpline( xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0, # bbox=[min(x), max(x), min(y), max(y)] # ?? ) _values = f.ev(coords[_idxs, 0], coords[_idxs, 1]) elif method in ['nearest', 'linear']: # Inspired by StackOverflow 35807321 if np.any(zi.mask): m_interp = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method ) # Pick nodes NOT "contaminated" by masked values interp_mask = m_interp(coords[_idxs]) > 0 f = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi).T, method=method ) _values = f(coords[_idxs]) else: raise ValueError( f"Invalid value method specified <{method}>!") if interp_mask is not None: # pylint: disable=invalid-unary-operand-type helper = np.ones_like(_values).astype(bool) helper[interp_mask] = False # _idxs is inverse mask _idxs[_idxs] = helper _values = _values[~interp_mask] idxs.append(_idxs) values.append(_values) return (np.hstack(idxs), np.hstack(values))
@lru_cache(maxsize=1) def __call__(self) -> Dict[int, npt.NDArray[int]]: """Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ elements = {i+1: index+1 for i, index in enumerate(self.mesh.msh_t.tria3['index'])} elements.update({i+len(elements)+1: index+1 for i, index in enumerate(self.mesh.msh_t.quad4['index'])}) return elements
1,330
1,354
"""This module defines classes that handle mesh and mesh operations. This module defines a factory class for mesh, similar to geometry and size function factory class. It also defines concrete mesh types. Currently two concrete mesh types are defined for generic Eucledian mesh and specific 2D Eucledian mesh. """ from functools import lru_cache import logging from multiprocessing import Pool, cpu_count import os import pathlib from collections import defaultdict import warnings from typing import Union, List, Tuple, Dict, Any, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal import pandas as pd import geopandas as gpd from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk from matplotlib.path import Path from matplotlib.transforms import Bbox from matplotlib.tri import Triangulation from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np import numpy.typing as npt from pyproj import CRS, Transformer from scipy.interpolate import ( RectBivariateSpline, RegularGridInterpolator) from shapely.geometry import ( LineString, box, Polygon, MultiPolygon) from shapely.ops import polygonize, linemerge from ocsmesh import utils from ocsmesh.raster import Raster from ocsmesh.mesh.base import BaseMesh from ocsmesh.mesh.parsers import grd, sms2dm _logger = logging.getLogger(__name__) class EuclideanMesh(BaseMesh): """Generic Euclidean mesh class This is the base class for 2D or 3D Euclidean mesh. Attributes ---------- tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t] Reference to underlying jigsaw mesh's triangle element structure. triangles : npt.NDArray[np.float32] Array of node index for triangular elements. quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t] Reference to underlying jigsaw mesh's quadrangle element structure. quads : npt.NDArray[np.float32] Array of node index for quadrangular elements. crs : CRS Coodrinate reference system of the mesh object hull : Hull Handle to hull calculation helper object nodes : Nodes Handle to node handler helper object elements : Elements Handle to element handler helper object Methods ------- write(path, overwrite=False, format='grd') Export mesh object to the disk in the specified format. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type. """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID != 'euclidean-mesh': raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, ' "but expected 'euclidean-mesh'.") if not hasattr(mesh, 'crs'): warnings.warn('Input mesh has no CRS information.') mesh.crs = None else: if not isinstance(mesh.crs, CRS): raise ValueError(f'crs property must be of type {CRS}, not ' f'type {type(mesh.crs)}.') self._hull = None self._nodes = None self._elements = None self._msh_t = mesh def write( self, path: Union[str, os.PathLike], overwrite: bool = False, format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622 ) -> None: """Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported. """ path = pathlib.Path(path) if path.exists() and overwrite is not True: raise IOError( f'File {str(path)} exists and overwrite is not True.') if format == 'grd': grd_dict = utils.msh_t_to_grd(self.msh_t) if self._boundaries and self._boundaries.data: grd_dict.update(boundaries=self._boundaries.data) grd.write(grd_dict, path, overwrite) elif format == '2dm': sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite) elif format == 'msh': savemsh(str(path), self.msh_t) elif format == 'vtk': savevtk(str(path), self.msh_t) else: raise ValueError(f'Unhandled format {format}.') @property def tria3(self): """Reference to underlying mesh tirangle element structure""" return self.msh_t.tria3 @property def triangles(self): """Reference to underlying mesh triangle element index array""" return self.msh_t.tria3['index'] @property def quad4(self): """Reference to underlying mesh quadrangle element structure""" return self.msh_t.quad4 @property def quads(self): """Reference to underlying mesh quadrangle element index array""" return self.msh_t.quad4['index'] @property def crs(self): """Reference to underlying mesh crs""" return self.msh_t.crs @property def hull(self): """Reference to hull calculator helper object""" if self._hull is None: self._hull = Hull(self) return self._hull @property def nodes(self): """Reference to node handler helper object""" if self._nodes is None: self._nodes = Nodes(self) return self._nodes @property def elements(self): """Reference to element handler helper object""" if self._elements is None: self._elements = Elements(self) return self._elements class EuclideanMesh2D(EuclideanMesh): """2D Euclidean mesh definition Attributes ---------- boundaries vert2 value bbox Methods ------- get_bbox(crs=None, output_type=None) Gets the bounding box of the mesh elements. tricontourf(**kwargs) Create a contour plot from the value data on the nodes of the mesh interpolate(raster, method='spline', nprocs=None) Interpolate raster date on the nodes. get_contour(level) Get contour lines from node value data at specified levels. get_multipolygon(zmin=None, zmax=None) Get multipolygon of the mesh hull. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``. """ super().__init__(mesh) self._boundaries = None if mesh.ndims != +2: raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, ' "but expected ndims=2.") if len(self.msh_t.value) == 0: self.msh_t.value = np.array( np.full((self.vert2['coord'].shape[0], 1), np.nan)) def get_bbox( self, crs: Union[str, CRS, None] = None, output_type: Literal[None, 'polygon', 'bbox'] = None ) -> Union[Polygon, Bbox]: """Get the bounding box of mesh elements. Parameters ---------- crs : str or CRS or None, default=None CRS to transform the calculated bounding box into before returning output_type : { None, 'polygon', 'bbox'}, default=None Output type Returns ------- Polygon or Bbox Bounding box of the mesh elements. """ output_type = 'polygon' if output_type is None else output_type xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0]) ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1]) crs = self.crs if crs is None else crs if crs is not None: if not self.crs.equals(crs): transformer = Transformer.from_crs( self.crs, crs, always_xy=True) # pylint: disable=E0633 (xmin, xmax), (ymin, ymax) = transformer.transform( (xmin, xmax), (ymin, ymax)) if output_type == 'polygon': # pylint: disable=R1705 return box(xmin, ymin, xmax, ymax) elif output_type == 'bbox': return Bbox([[xmin, ymin], [xmax, ymax]]) raise TypeError( 'Argument output_type must a string literal \'polygon\' or ' '\'bbox\'') @property def boundaries(self): """Handle to boundaries calculator helper object""" if self._boundaries is None: self._boundaries = Boundaries(self) return self._boundaries def tricontourf(self, **kwargs) -> Axes: """Generate contour for the data of triangular elements of the mesh Parameters ---------- **kwargs : dict, optional Passed to underlying `matplotlib` API. Returns ------- Axes Axes on which the filled contour is drawn. """ return utils.tricontourf(self.msh_t, **kwargs) def interpolate( self, raster: Union[Raster, List[Raster]], method: Literal['spline', 'linear', 'nearest'] = 'spline', nprocs: Optional[int] = None, info_out_path: Union[pathlib.Path, str, None] = None, filter_by_shape: bool = False ) -> None: """Interplate values from raster inputs to the mesh nodes. Parameters ---------- raster : Raster or list of Raster A single or a list of rasters from which values are interpolated onto the mesh method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. nprocs : int or None, default=None Number of workers to use when interpolating data. info_out_path : pathlike or str or None Path for the output node interpolation information file filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- None """ if isinstance(raster, Raster): raster = [raster] nprocs = -1 if nprocs is None else nprocs nprocs = cpu_count() if nprocs == -1 else nprocs # Fix an issue on Jupyter notebook where having pool execute # interpolation even in case of nprocs == 1 would results in # application getting stuck if nprocs > 1: with Pool(processes=nprocs) as pool: res = pool.starmap( _mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] ) pool.join() else: res = [_mesh_interpolate_worker( self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] values = self.msh_t.value.flatten() interp_info_map = {} for (mask, _values), rast in zip(res, raster): values[mask] = _values if info_out_path is not None: vert_cs = None rast_crs = rast.crs if rast_crs.is_vertical: if rast_crs.sub_crs_list is not None: for sub_crs in rast_crs.sub_crs_list: if sub_crs.is_vertical: # TODO: What if sub CRS is compound, etc.? vert_cs = sub_crs elif rast_crs.source_crs is not None: if rast_crs.source_crs.is_vertical: # TODO: What if source CRS is compound, etc.? vert_cs = rast_crs.source_crs vert_cs_name = vert_cs.name idxs = np.argwhere(mask).ravel() interp_info_map.update({ idx: (rast.path, vert_cs_name) for idx in idxs}) if info_out_path is not None: coords = self.msh_t.vert2['coord'].copy() geo_coords = coords.copy() if not self.crs.is_geographic: transformer = Transformer.from_crs( self.crs, CRS.from_epsg(4326), always_xy=True) # pylint: disable=E0633 geo_coords[:, 0], geo_coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) vd_idxs=np.array(list(interp_info_map.keys())) df_interp_info = pd.DataFrame( index=vd_idxs, data={ 'x': coords[vd_idxs, 0], 'y': coords[vd_idxs, 1], 'lat': geo_coords[vd_idxs, 0], 'lon': geo_coords[vd_idxs, 1], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()] } ) df_interp_info.sort_index().to_csv( info_out_path, header=False, index=True) self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) def get_contour(self, level: float) -> LineString: """Extract contour lines at the specified `level` from mesh values Parameters ---------- level : float The level at which contour lines must be extracted. Returns ------- LineString Extracted and merged contour lines. Raises ------ ValueError If mesh has nodes that have null value `np.nan`. """ # ONLY SUPPORTS TRIANGLES for attr in ['quad4', 'hexa8']: if len(getattr(self.msh_t, attr)) > 0: warnings.warn( 'Mesh contour extraction only supports triangles') coords = self.msh_t.vert2['coord'] values = self.msh_t.value trias = self.msh_t.tria3['index'] if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") x, y = coords[:, 0], coords[:, 1] features = [] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) _logger.debug('Computing contours...') fig, ax = plt.subplots() ax.tricontour( x, y, trias, values.ravel(), levels=[level]) plt.close(fig) for path_collection in ax.collections: for path in path_collection.get_paths(): try: features.append(LineString(path.vertices)) except ValueError: # LineStrings must have at least 2 coordinate tuples pass return linemerge(features) def get_multipolygon( self, zmin: Optional[float] = None, zmax: Optional[float] = None ) -> MultiPolygon: """Calculate multipolygon covering mesh elements (hull) Parameters ---------- zmin : float or None Minimum elevation to consider for multipolygon extraction zmax : float or None Maximum elevation to consider for multipolygon extraction Returns ------- MultiPolygon Calculated multipolygon shape """ values = self.msh_t.value mask = np.ones(values.shape) if zmin is not None: mask = np.logical_and(mask, values > zmin) if zmax is not None: mask = np.logical_and(mask, values < zmax) # Assuming value is of shape (N, 1) # ravel to make sure it's 1D verts_in = np.argwhere(mask).ravel() clipped_mesh = utils.clip_mesh_by_vertex( self.msh_t, verts_in, can_use_other_verts=True) boundary_edges = utils.get_boundary_edges(clipped_mesh) coords = clipped_mesh.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} poly_gen = polygonize(coords[boundary_edges]) polys = list(poly_gen) polys = sorted(polys, key=lambda p: p.area, reverse=True) rings = [p.exterior for p in polys] n_parents = np.zeros((len(rings),)) represent = np.array([r.coords[0] for r in rings]) for e, ring in enumerate(rings[:-1]): path = Path(ring.coords, closed=True) n_parents = n_parents + np.pad( np.array([ path.contains_point(pt) for pt in represent[e+1:]]), (e+1, 0), 'constant', constant_values=0) # Get actual polygons based on logic described above polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2] return MultiPolygon(polys) @property def vert2(self): """Reference to underlying mesh 2D vertices structure""" return self.msh_t.vert2 @property def value(self): """Reference to underlying mesh values""" return self.msh_t.value @property def bbox(self): """Calculates and returns bounding box of the mesh hull. See Also -------- get_bbox """ return self.get_bbox() MeshType = Union[EuclideanMesh2D] class Mesh(BaseMesh): """Mesh object factory Factory class that creates and returns concrete mesh object based on the input types. Methods ------- open(path, crs=None) Read mesh data from a file on disk. """ def __new__(cls, mesh: jigsaw_msh_t) -> MeshType: """Construct a concrete mesh object. Parameters ---------- mesh : jigsaw_msh_t Input jigsaw mesh object Returns ------- MeshType Mesh object created from the input Raises ------ TypeError Input `mesh` is not a `jigsaw_msh_t` object. NotImplementedError Input `mesh` object cannot be used to create a EuclideanMesh2D """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID == 'euclidean-mesh': if mesh.ndims == 2: return EuclideanMesh2D(mesh) raise NotImplementedError( f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not ' 'handled.') raise NotImplementedError(f'mshID={mesh.mshID} not handled.') @staticmethod def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType: """Read mesh from a file on disk Parameters ---------- path : path-like Path to the file containig mesh. crs : CRS or None, default=None CRS of the mesh in the path. Overwrites any info read from file, no transformation is done. Returns ------- MeshType Mesh object created by reading the file. Raises ------ TypeError If cannot determine the input mesh type. Notes ----- Currently only SMS-2DM and GRD formats are supported for reading. """ try: msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs)) msh_t.value = np.negative(msh_t.value) return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 if 'not a valid grd file' in str(e): pass else: raise e try: return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs))) except ValueError: pass try: msh_t = jigsaw_msh_t() loadmsh(msh_t, path) msh_t.crs = crs return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 pass raise TypeError( f'Unable to automatically determine file type for {str(path)}.') class Rings: """Helper class for handling mesh rings. This is a helper class to manage the calculation of internal and external rings of the mesh polygon or hull. Attributes ---------- Methods ------- __call__() Returns all rings of the mesh hull interior() Return the interior rings of the mesh hull exterior() Return the exterior rings of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the ring calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates rings. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ polys = utils.get_mesh_polygons(self.mesh.msh_t) data = [] bnd_id = 0 for poly in polys: data.append({ "geometry": poly.exterior, "bnd_id": bnd_id, "type": 'exterior' }) for interior in poly.interiors: data.append({ "geometry": interior, "bnd_id": bnd_id, "type": 'interior' }) bnd_id = bnd_id + 1 return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Extracts the exterior ring from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior ring of the mesh hull polygon. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Extracts the interior rings from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior rings of the mesh hull polygon. """ return self().loc[self()['type'] == 'interior'] class Edges: """Helper class for handling mesh boundary edges. Attributes ---------- Methods ------- __call__() Return all boundary edges of the mesh hull interior() Return the interior boundary edges of the mesh hull exterior() Return the exterior boundary edges of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the edge calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which boundary edges are calculated. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for ring in self.mesh.hull.rings().itertuples(): coords = ring.geometry.coords for i in range(1, len(coords)): data.append({ "geometry": LineString([coords[i-1], coords[i]]), "bnd_id": ring.bnd_id, "type": ring.type}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Retruns exterior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Retruns interior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'interior'] class Hull: """Helper class for handling mesh hull calculations. This class wraps the functionality of ring and edge classes and adds additional methods to calculate or extract the polygon or triangulation of the mesh Attributes ---------- Methods ------- __call__() Calculates all the polys from all mesh rings exterior() Calculates the exterior rings of the mesh hull. interior() Calculates the interior rings of the mesh hull. implode() Calculates all the polygons (including isolated domain islands) in the mesh and returns a table of polygons. multipolygon() Calculates all the polygons (including isolated domain islands) in the mesh and returns a multipolygon. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize helper class for handling mesh hull calculations Parameters ---------- mesh : EuclideanMesh Input mesh for which hull calculations are done. Notes ----- This object holds onto the ring and edge calculator objects as well as a reference to the input mesh. """ self.mesh = mesh self.rings = Rings(mesh) self.edges = Edges(mesh) @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for bnd_id in np.unique(self.rings()['bnd_id'].tolist()): exterior = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior')] interiors = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior')] data.append({ "geometry": Polygon( exterior.iloc[0].geometry.coords, [row.geometry.coords for _, row in interiors.iterrows()]), "bnd_id": bnd_id }) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Creates polygons from exterior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from exterior rings of the mesh hull """ data = [] for exterior in self.rings().loc[ self.rings()['type'] == 'exterior'].itertuples(): data.append({"geometry": Polygon(exterior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def interior(self) -> gpd.GeoDataFrame: """Creates polygons from interior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from interior rings of the mesh hull """ data = [] for interior in self.rings().loc[ self.rings()['type'] == 'interior'].itertuples(): data.append({"geometry": Polygon(interior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def implode(self) -> gpd.GeoDataFrame: """Creates a dataframe from mesh polygons. Parameters ---------- Returns ------ gpd.GeoDataFrame Dataframe containing polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The difference of the return value of this method and `__call__` is that the `implode` returns a dataframe with a single `MultiPolygon` where as `__call__` returns a dataframe with multiple `Polygon` entries with associated `bnd_id`. """ return gpd.GeoDataFrame( {"geometry": MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) def multipolygon(self) -> MultiPolygon: """Returns mesh multi-polygons. Parameters ---------- Returns ------ MultiPolygon Combined shape of polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. implode() Dataframe with a single combined multipolygon of the mesh polygons. Notes ----- The difference of the return value of this method and `implode` is that `multipolygon` returns a `MultiPolygon` object where as `implode` returns a dataframe warpping the multipolygon object. """ mp = self.implode().iloc[0].geometry if isinstance(mp, Polygon): mp = MultiPolygon([mp]) return mp def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.mesh.msh_t.tria3['index'].tolist() for quad in self.mesh.msh_t.quad4['index']: triangles.extend([ [quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]] ]) return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) class Nodes: """Helper class for handling mesh nodes. Attributes ---------- id_to_index : dict Mapping to convert node IDs to node indexes. index_to_id : dict Mapping to convert node indexes to node IDs. Methods ------- __call__() Creates a mapping between node IDs (index + 1) and node coordinates id() Returns list of node IDs. index() Return array of node indices. coords() Return mesh coordinates. values() Return values stored for mesh nodes. get_index_by_id(node_id) Get the node index based on node ID. get_id_by_index(index) Get the node ID based on the node index. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes node handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles nodes info. """ self.mesh = mesh self._id_to_index = None self._index_to_id = None @lru_cache(maxsize=1) def __call__(self) -> Dict[int, int]: """Creates a mapping between node IDs and indexes. Parameters ---------- Returns ------- dict Mapping between node IDs and indexes. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return {i+1: coord for i, coord in enumerate(self.coords())} def id(self) -> List[int]: """Retrives a list of element IDs. Parameters ---------- Returns ------- list of int List of node IDs as created by `__call__` """ return list(self().keys()) def index(self) -> npt.NDArray[int]: """Retrives an array of element indexes. Parameters ---------- Returns ------- array-like Array of node indexes. """ return np.arange(len(self())) def coords(self) -> npt.NDArray[np.float32]: """Retrieve the coordinates of mesh nodes Parameters ---------- Returns ------- array-like Coordinates of the mesh nodes as returned by `BaseMesh.coord` """ return self.mesh.coord def values(self): """Retrieve the values stored for mesh nodes Parameters ---------- Returns ------- array-like Values on the mesh nodes as returned by `BaseMesh.values` """ return self.mesh.values def get_index_by_id(self, node_id): """Converts mesh ID to mesh index. Parameters ---------- node_id : int ID of the node of interest Returns ------- int Index of the node of interest """ return self.id_to_index[node_id] def get_id_by_index(self, index: int): """Converts mesh index to mesh ID. Parameters ---------- index : int Index of the node of interest. Returns ------- int ID of the node of interest """ return self.index_to_id[index] @property def id_to_index(self) -> Dict[int, int]: """Read-only property returning the mapping of ID to index Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._id_to_index is None: self._id_to_index = {node_id: index for index, node_id in enumerate(self().keys())} return self._id_to_index @property def index_to_id(self) -> Dict[int, int]: """Read-only property returning the mapping of index to ID Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._index_to_id is None: self._index_to_id = dict(enumerate(self().keys())) return self._index_to_id # def get_indexes_around_index(self, index): # indexes_around_index = self.__dict__.get('indexes_around_index') # if indexes_around_index is None: # def append(geom): # for simplex in geom: # for i, j in permutations(simplex, 2): # indexes_around_index[i].add(j) # indexes_around_index = defaultdict(set) # append(self.gr3.elements.triangles()) # append(self.gr3.elements.quads()) # self.__dict__['indexes_around_index'] = indexes_around_index # return list(indexes_around_index[index]) class Elements: """Helper class for handling mesh elements. Attributes ---------- Methods -------- __call__() Creates a mapping between element IDs and associated node IDs. id() Returns a list of element IDs. index() Returns an array of element indexes. array() Creates and returns a masked array of element node indices. triangles() Creates and returns a 2D array of triangular element node indices. quads() Creates and returns a 2D array of quadrangular element node indices. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. geodataframe() Creates and returns a dataframe of with polygon entires for each element. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize the element handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles elements info. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> Dict[int, npt.NDArray[int]]: """Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ elements = {i+1: index+1 for i, index in enumerate(self.mesh.msh_t.tria3['index'])} elements.update({i+len(elements)+1: index+1 for i, index in enumerate(self.mesh.msh_t.quad4['index'])}) return elements @lru_cache(maxsize=1) def id(self) -> List[int]: """Retrieves the list of element IDs as returned by `__call__` Parameters ---------- Returns ------- list of int List of element IDs. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return list(self().keys()) @lru_cache(maxsize=1) def index(self) -> npt.NDArray[int]: """Retrieves an array of element indices Parameters ---------- Returns ------- npt.NDArray 1D array of element indices. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.arange(len(self())) def array(self) -> npt.NDArray[int]: """Retrieves a masked array of element node IDs. The return value is ``n x m`` where ``n`` is the number of elements and ``m`` is the maximum number of element nodes, e.g. if there are only trias, then it's 3, for trias and quads it is 4. Parameters ---------- Returns ------- npt.NDArray Masked array where elements with fewer associated nodes have trailing masked node columns in the array. """ rank = int(max(map(len, self().values()))) array = np.full((len(self()), rank), -1) for i, elem_nd_ids in enumerate(self().values()): row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))) array[i, :len(row)] = row return np.ma.masked_equal(array, -1) @lru_cache(maxsize=1) def triangles(self) -> npt.NDArray[int]: """Retrieves an array of tria element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for triangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 3]) @lru_cache(maxsize=1) def quads(self): """Retrieves an array of quad element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for quadrangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 4]) def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.triangles().tolist() for quad in self.quads(): # TODO: Not tested. triangles.append([quad[0], quad[1], quad[3]]) triangles.append([quad[1], quad[2], quad[3]]) return Triangulation( self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) def geodataframe(self) -> gpd.GeoDataFrame: """Create polygons for each element and return in dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe created from entries of `Polygon` type for each element. """ data = [] for elem_id, elem_nd_ids in self().items(): data.append({ 'geometry': Polygon( self.mesh.coord[list( map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) class Boundaries: """Helper class for mesh boundary condition calculation Attributes ---------- data : dict Mapping for boundary information Methods ------- __call__() Retrieves a dataframe for all boundary shapes and type info. __len__() Gets the number of calculated boundary segments. ocean() Retrieves a dataframe containing shapes and type info of ocean boundaries land() Retrieves a dataframe containing shapes and type info of land boundaries interior() Retrieves a dataframe containing shapes and type info of island boundaries auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1) Automatically generate boundary information based on the input land indicator `threshold` """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize boundary helper object Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates boundaries. """ # TODO: Add a way to manually initialize self.mesh = mesh self._ocean = gpd.GeoDataFrame() self._land = gpd.GeoDataFrame() self._interior = gpd.GeoDataFrame() self._data = defaultdict(defaultdict) @lru_cache(maxsize=1) def _init_dataframes(self) -> None: """Internal: Creates boundary dataframes based on boundary data Parameters ---------- Returns ------- None Notes ----- This method doesn't have any return value, but it is cached so that on re-execution it doesn't recalculate. """ boundaries = self._data ocean_boundaries = [] land_boundaries = [] interior_boundaries = [] if boundaries is not None: for ibtype, bnds in boundaries.items(): if ibtype is None: for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) ocean_boundaries.append({ 'id': bnd_id, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) elif str(ibtype).endswith('1'): for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) interior_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) else: for bnd_id, data in bnds.items(): _indexes = np.array(data['indexes']) if _indexes.ndim > 1: # ndim > 1 implies we're dealing with an ADCIRC # mesh that includes boundary pairs, such as weir new_indexes = [] for i, line in enumerate(_indexes.T): if i % 2 != 0: new_indexes.extend(np.flip(line)) else: new_indexes.extend(line) _indexes = np.array(new_indexes).flatten() else: _indexes = _indexes.flatten() indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes)) land_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) self._ocean = gpd.GeoDataFrame(ocean_boundaries) self._land = gpd.GeoDataFrame(land_boundaries) self._interior = gpd.GeoDataFrame(interior_boundaries) def ocean(self) -> gpd.GeoDataFrame: """Retrieve the ocean boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of ocean open boundary. """ self._init_dataframes() return self._ocean def land(self): """Retrieve the land boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of land boundary. """ self._init_dataframes() return self._land def interior(self): """Retrieve the island boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of island boundary. """ self._init_dataframes() return self._interior @property def data(self) -> Dict[Optional[int], Any]: """Read-only property referencing the boundary data dictionary""" return self._data @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ self._init_dataframes() data = [] for bnd in self.ocean().itertuples(): data.append({ 'id': bnd.id, 'ibtype': None, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.land().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.interior().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def __len__(self) -> int: """Returns the number of boundary segments""" return len(self()) def auto_generate( self, threshold: float = 0., land_ibtype: int = 0, interior_ibtype: int = 1, ): """Automatically detect boundaries based on elevation data. Parameters ---------- threshold : float, default=0 Threshold above which nodes are considered dry nodes for ocean vs land boundary detection land_ibtype : int, default=0 Value to assign to land boundary type interior_ibtype : int, default=1 Value to assign to island boundary type Returns ------- None Raises ------ ValueError If any of the values assigned to a mesh node is `np.nan`. Notes ----- An edge is considered dry if any of the attached nodes are dry (its elevation is larger than or equal to the `threshold`). """ values = self.mesh.value if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") coords = self.mesh.msh_t.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} polys = utils.get_mesh_polygons(self.mesh.msh_t) # TODO: Split using shapely to get bdry segments boundaries = defaultdict(defaultdict) bdry_type = dict get_id = self.mesh.nodes.get_id_by_index # generate exterior boundaries for poly in polys: ext_ring_coo = poly.exterior.coords ext_ring = np.array([ (coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[e + 1]]) for e, coo in enumerate(ext_ring_coo[:-1])]) # find boundary edges edge_tag = np.full(ext_ring.shape, 0) edge_tag[ np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1 edge_tag[ np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1 edge_tag[ np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1 edge_tag[ np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1 # sort boundary edges ocean_boundary = [] land_boundary = [] for i, (e0, e1) in enumerate(edge_tag): if np.any(np.asarray((e0, e1)) == 1): land_boundary.append(tuple(ext_ring[i, :])) elif np.any(np.asarray((e0, e1)) == -1): ocean_boundary.append(tuple(ext_ring[i, :])) # ocean_boundaries = utils.sort_edges(ocean_boundary) # land_boundaries = utils.sort_edges(land_boundary) ocean_boundaries = [] if len(ocean_boundary) != 0: #pylint: disable=not-an-iterable ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist()) ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs ocean_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in ocean_segs] land_boundaries = [] if len(land_boundary) != 0: #pylint: disable=not-an-iterable land_segs = linemerge(coords[np.array(land_boundary)].tolist()) land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs land_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in land_segs] _bnd_id = len(boundaries[None]) for bnd in ocean_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[None][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # add land boundaries _bnd_id = len(boundaries[land_ibtype]) for bnd in land_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[land_ibtype][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # generate interior boundaries _bnd_id = 0 interior_boundaries = defaultdict() for poly in polys: interiors = poly.interiors for interior in interiors: int_ring_coo = interior.coords int_ring = [ (coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[e + 1]]) for e, coo in enumerate(int_ring_coo[:-1])] # TODO: Do we still need these? e0, e1 = [list(t) for t in zip(*int_ring)] if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0: e0 = e0[::-1] e1 = e1[::-1] e0 = [get_id(vert) for vert in e0] e0.append(e0[0]) interior_boundaries[_bnd_id] = e0 _bnd_id += 1 for bnd_id, data in interior_boundaries.items(): boundaries[interior_ibtype][bnd_id] = bdry_type( indexes=data, properties={}) self._data = boundaries self._init_dataframes.cache_clear() self.__call__.cache_clear() self._init_dataframes() SortedRingType = Dict[int, Dict[Literal['exterior', 'interiors'], Union[npt.NDArray, List[npt.NDArray]]] ] def sort_rings( index_rings: List[List[Tuple[int, int]]], vertices: npt.NDArray[np.float32]) -> SortedRingType: """Sorts a list of index-rings. Takes a list of unsorted index rings and sorts them into "exterior" and "interior" components. Any doubly-nested rings are considered exterior rings. Parameters ---------- index_rings : List[List[Tuple[int, int]]] Unosorted list of list of mesh edges as specified by end node indexs of each edge. vertices : npt.NDArray[np.float32] 2D ``n x 2`` array of node coordinate couples. Returns ------- SortedRingType Dictionary of information aboout polygon boundaries extracted based on the input Notes ----- The return value is a mapping of ring index to dictionary containing exterior and interior linear ring information as numpy array This function is not currently used, instead a different faster approach is used for boundary and polygon calculation from elements. """ # TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can # probably be optimized using shapely. # sort index_rings into corresponding "polygons" areas = [] for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = {} _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = [] for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = [] for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings def _mesh_interpolate_worker( coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[str, Path], chunk_size: Optional[int], method: Literal['spline', 'linear', 'nearest'] = "spline", filter_by_shape: bool = False): """Interpolator worker function to be used in parallel calls Parameters ---------- coords : npt.NDArray[np.float32] Mesh node coordinates. coords_crs : CRS Coordinate reference system of the input mesh coordinates. raster_path : str or Path Path to the raster temporary working file. chunk_size : int or None Chunk size for windowing over the raster. method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- idxs : npt.NDArray[bool] Mask of the nodes whose values are updated by current interpolation values : npt.NDArray[np.float32] Interpolated values. Raises ------ ValueError If specified interpolation `method` is not supported. """ coords = np.array(coords) raster = Raster(raster_path) idxs = [] values = [] for window in raster.iter_windows(chunk_size=chunk_size, overlap=2): if not raster.crs.equals(coords_crs): transformer = Transformer.from_crs( coords_crs, raster.crs, always_xy=True) # pylint: disable=E0633 coords[:, 0], coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) xi = raster.get_x(window) yi = raster.get_y(window) # Use masked array to ignore missing values from DEM zi = raster.get_values(window=window, masked=True) if not filter_by_shape: _idxs = np.logical_and( np.logical_and( np.min(xi) <= coords[:, 0], np.max(xi) >= coords[:, 0]), np.logical_and( np.min(yi) <= coords[:, 1], np.max(yi) >= coords[:, 1])) else: shape = raster.get_multipolygon() gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1]) _idxs = gs_pt.intersects(shape) interp_mask = None if method == 'spline': f = RectBivariateSpline( xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0, # bbox=[min(x), max(x), min(y), max(y)] # ?? ) _values = f.ev(coords[_idxs, 0], coords[_idxs, 1]) elif method in ['nearest', 'linear']: # Inspired by StackOverflow 35807321 if np.any(zi.mask): m_interp = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method ) # Pick nodes NOT "contaminated" by masked values interp_mask = m_interp(coords[_idxs]) > 0 f = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi).T, method=method ) _values = f(coords[_idxs]) else: raise ValueError( f"Invalid value method specified <{method}>!") if interp_mask is not None: # pylint: disable=invalid-unary-operand-type helper = np.ones_like(_values).astype(bool) helper[interp_mask] = False # _idxs is inverse mask _idxs[_idxs] = helper _values = _values[~interp_mask] idxs.append(_idxs) values.append(_values) return (np.hstack(idxs), np.hstack(values))
__call__
Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values.
"""This module defines classes that handle mesh and mesh operations. This module defines a factory class for mesh, similar to geometry and size function factory class. It also defines concrete mesh types. Currently two concrete mesh types are defined for generic Eucledian mesh and specific 2D Eucledian mesh. """ from functools import lru_cache import logging from multiprocessing import Pool, cpu_count import os import pathlib from collections import defaultdict import warnings from typing import Union, List, Tuple, Dict, Any, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal import pandas as pd import geopandas as gpd from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk from matplotlib.path import Path from matplotlib.transforms import Bbox from matplotlib.tri import Triangulation from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np import numpy.typing as npt from pyproj import CRS, Transformer from scipy.interpolate import ( RectBivariateSpline, RegularGridInterpolator) from shapely.geometry import ( LineString, box, Polygon, MultiPolygon) from shapely.ops import polygonize, linemerge from ocsmesh import utils from ocsmesh.raster import Raster from ocsmesh.mesh.base import BaseMesh from ocsmesh.mesh.parsers import grd, sms2dm _logger = logging.getLogger(__name__) class EuclideanMesh(BaseMesh): """Generic Euclidean mesh class This is the base class for 2D or 3D Euclidean mesh. Attributes ---------- tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t] Reference to underlying jigsaw mesh's triangle element structure. triangles : npt.NDArray[np.float32] Array of node index for triangular elements. quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t] Reference to underlying jigsaw mesh's quadrangle element structure. quads : npt.NDArray[np.float32] Array of node index for quadrangular elements. crs : CRS Coodrinate reference system of the mesh object hull : Hull Handle to hull calculation helper object nodes : Nodes Handle to node handler helper object elements : Elements Handle to element handler helper object Methods ------- write(path, overwrite=False, format='grd') Export mesh object to the disk in the specified format. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type. """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID != 'euclidean-mesh': raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, ' "but expected 'euclidean-mesh'.") if not hasattr(mesh, 'crs'): warnings.warn('Input mesh has no CRS information.') mesh.crs = None else: if not isinstance(mesh.crs, CRS): raise ValueError(f'crs property must be of type {CRS}, not ' f'type {type(mesh.crs)}.') self._hull = None self._nodes = None self._elements = None self._msh_t = mesh def write( self, path: Union[str, os.PathLike], overwrite: bool = False, format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622 ) -> None: """Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported. """ path = pathlib.Path(path) if path.exists() and overwrite is not True: raise IOError( f'File {str(path)} exists and overwrite is not True.') if format == 'grd': grd_dict = utils.msh_t_to_grd(self.msh_t) if self._boundaries and self._boundaries.data: grd_dict.update(boundaries=self._boundaries.data) grd.write(grd_dict, path, overwrite) elif format == '2dm': sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite) elif format == 'msh': savemsh(str(path), self.msh_t) elif format == 'vtk': savevtk(str(path), self.msh_t) else: raise ValueError(f'Unhandled format {format}.') @property def tria3(self): """Reference to underlying mesh tirangle element structure""" return self.msh_t.tria3 @property def triangles(self): """Reference to underlying mesh triangle element index array""" return self.msh_t.tria3['index'] @property def quad4(self): """Reference to underlying mesh quadrangle element structure""" return self.msh_t.quad4 @property def quads(self): """Reference to underlying mesh quadrangle element index array""" return self.msh_t.quad4['index'] @property def crs(self): """Reference to underlying mesh crs""" return self.msh_t.crs @property def hull(self): """Reference to hull calculator helper object""" if self._hull is None: self._hull = Hull(self) return self._hull @property def nodes(self): """Reference to node handler helper object""" if self._nodes is None: self._nodes = Nodes(self) return self._nodes @property def elements(self): """Reference to element handler helper object""" if self._elements is None: self._elements = Elements(self) return self._elements class EuclideanMesh2D(EuclideanMesh): """2D Euclidean mesh definition Attributes ---------- boundaries vert2 value bbox Methods ------- get_bbox(crs=None, output_type=None) Gets the bounding box of the mesh elements. tricontourf(**kwargs) Create a contour plot from the value data on the nodes of the mesh interpolate(raster, method='spline', nprocs=None) Interpolate raster date on the nodes. get_contour(level) Get contour lines from node value data at specified levels. get_multipolygon(zmin=None, zmax=None) Get multipolygon of the mesh hull. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``. """ super().__init__(mesh) self._boundaries = None if mesh.ndims != +2: raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, ' "but expected ndims=2.") if len(self.msh_t.value) == 0: self.msh_t.value = np.array( np.full((self.vert2['coord'].shape[0], 1), np.nan)) def get_bbox( self, crs: Union[str, CRS, None] = None, output_type: Literal[None, 'polygon', 'bbox'] = None ) -> Union[Polygon, Bbox]: """Get the bounding box of mesh elements. Parameters ---------- crs : str or CRS or None, default=None CRS to transform the calculated bounding box into before returning output_type : { None, 'polygon', 'bbox'}, default=None Output type Returns ------- Polygon or Bbox Bounding box of the mesh elements. """ output_type = 'polygon' if output_type is None else output_type xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0]) ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1]) crs = self.crs if crs is None else crs if crs is not None: if not self.crs.equals(crs): transformer = Transformer.from_crs( self.crs, crs, always_xy=True) # pylint: disable=E0633 (xmin, xmax), (ymin, ymax) = transformer.transform( (xmin, xmax), (ymin, ymax)) if output_type == 'polygon': # pylint: disable=R1705 return box(xmin, ymin, xmax, ymax) elif output_type == 'bbox': return Bbox([[xmin, ymin], [xmax, ymax]]) raise TypeError( 'Argument output_type must a string literal \'polygon\' or ' '\'bbox\'') @property def boundaries(self): """Handle to boundaries calculator helper object""" if self._boundaries is None: self._boundaries = Boundaries(self) return self._boundaries def tricontourf(self, **kwargs) -> Axes: """Generate contour for the data of triangular elements of the mesh Parameters ---------- **kwargs : dict, optional Passed to underlying `matplotlib` API. Returns ------- Axes Axes on which the filled contour is drawn. """ return utils.tricontourf(self.msh_t, **kwargs) def interpolate( self, raster: Union[Raster, List[Raster]], method: Literal['spline', 'linear', 'nearest'] = 'spline', nprocs: Optional[int] = None, info_out_path: Union[pathlib.Path, str, None] = None, filter_by_shape: bool = False ) -> None: """Interplate values from raster inputs to the mesh nodes. Parameters ---------- raster : Raster or list of Raster A single or a list of rasters from which values are interpolated onto the mesh method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. nprocs : int or None, default=None Number of workers to use when interpolating data. info_out_path : pathlike or str or None Path for the output node interpolation information file filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- None """ if isinstance(raster, Raster): raster = [raster] nprocs = -1 if nprocs is None else nprocs nprocs = cpu_count() if nprocs == -1 else nprocs # Fix an issue on Jupyter notebook where having pool execute # interpolation even in case of nprocs == 1 would results in # application getting stuck if nprocs > 1: with Pool(processes=nprocs) as pool: res = pool.starmap( _mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] ) pool.join() else: res = [_mesh_interpolate_worker( self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] values = self.msh_t.value.flatten() interp_info_map = {} for (mask, _values), rast in zip(res, raster): values[mask] = _values if info_out_path is not None: vert_cs = None rast_crs = rast.crs if rast_crs.is_vertical: if rast_crs.sub_crs_list is not None: for sub_crs in rast_crs.sub_crs_list: if sub_crs.is_vertical: # TODO: What if sub CRS is compound, etc.? vert_cs = sub_crs elif rast_crs.source_crs is not None: if rast_crs.source_crs.is_vertical: # TODO: What if source CRS is compound, etc.? vert_cs = rast_crs.source_crs vert_cs_name = vert_cs.name idxs = np.argwhere(mask).ravel() interp_info_map.update({ idx: (rast.path, vert_cs_name) for idx in idxs}) if info_out_path is not None: coords = self.msh_t.vert2['coord'].copy() geo_coords = coords.copy() if not self.crs.is_geographic: transformer = Transformer.from_crs( self.crs, CRS.from_epsg(4326), always_xy=True) # pylint: disable=E0633 geo_coords[:, 0], geo_coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) vd_idxs=np.array(list(interp_info_map.keys())) df_interp_info = pd.DataFrame( index=vd_idxs, data={ 'x': coords[vd_idxs, 0], 'y': coords[vd_idxs, 1], 'lat': geo_coords[vd_idxs, 0], 'lon': geo_coords[vd_idxs, 1], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()] } ) df_interp_info.sort_index().to_csv( info_out_path, header=False, index=True) self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) def get_contour(self, level: float) -> LineString: """Extract contour lines at the specified `level` from mesh values Parameters ---------- level : float The level at which contour lines must be extracted. Returns ------- LineString Extracted and merged contour lines. Raises ------ ValueError If mesh has nodes that have null value `np.nan`. """ # ONLY SUPPORTS TRIANGLES for attr in ['quad4', 'hexa8']: if len(getattr(self.msh_t, attr)) > 0: warnings.warn( 'Mesh contour extraction only supports triangles') coords = self.msh_t.vert2['coord'] values = self.msh_t.value trias = self.msh_t.tria3['index'] if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") x, y = coords[:, 0], coords[:, 1] features = [] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) _logger.debug('Computing contours...') fig, ax = plt.subplots() ax.tricontour( x, y, trias, values.ravel(), levels=[level]) plt.close(fig) for path_collection in ax.collections: for path in path_collection.get_paths(): try: features.append(LineString(path.vertices)) except ValueError: # LineStrings must have at least 2 coordinate tuples pass return linemerge(features) def get_multipolygon( self, zmin: Optional[float] = None, zmax: Optional[float] = None ) -> MultiPolygon: """Calculate multipolygon covering mesh elements (hull) Parameters ---------- zmin : float or None Minimum elevation to consider for multipolygon extraction zmax : float or None Maximum elevation to consider for multipolygon extraction Returns ------- MultiPolygon Calculated multipolygon shape """ values = self.msh_t.value mask = np.ones(values.shape) if zmin is not None: mask = np.logical_and(mask, values > zmin) if zmax is not None: mask = np.logical_and(mask, values < zmax) # Assuming value is of shape (N, 1) # ravel to make sure it's 1D verts_in = np.argwhere(mask).ravel() clipped_mesh = utils.clip_mesh_by_vertex( self.msh_t, verts_in, can_use_other_verts=True) boundary_edges = utils.get_boundary_edges(clipped_mesh) coords = clipped_mesh.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} poly_gen = polygonize(coords[boundary_edges]) polys = list(poly_gen) polys = sorted(polys, key=lambda p: p.area, reverse=True) rings = [p.exterior for p in polys] n_parents = np.zeros((len(rings),)) represent = np.array([r.coords[0] for r in rings]) for e, ring in enumerate(rings[:-1]): path = Path(ring.coords, closed=True) n_parents = n_parents + np.pad( np.array([ path.contains_point(pt) for pt in represent[e+1:]]), (e+1, 0), 'constant', constant_values=0) # Get actual polygons based on logic described above polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2] return MultiPolygon(polys) @property def vert2(self): """Reference to underlying mesh 2D vertices structure""" return self.msh_t.vert2 @property def value(self): """Reference to underlying mesh values""" return self.msh_t.value @property def bbox(self): """Calculates and returns bounding box of the mesh hull. See Also -------- get_bbox """ return self.get_bbox() MeshType = Union[EuclideanMesh2D] class Mesh(BaseMesh): """Mesh object factory Factory class that creates and returns concrete mesh object based on the input types. Methods ------- open(path, crs=None) Read mesh data from a file on disk. """ def __new__(cls, mesh: jigsaw_msh_t) -> MeshType: """Construct a concrete mesh object. Parameters ---------- mesh : jigsaw_msh_t Input jigsaw mesh object Returns ------- MeshType Mesh object created from the input Raises ------ TypeError Input `mesh` is not a `jigsaw_msh_t` object. NotImplementedError Input `mesh` object cannot be used to create a EuclideanMesh2D """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID == 'euclidean-mesh': if mesh.ndims == 2: return EuclideanMesh2D(mesh) raise NotImplementedError( f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not ' 'handled.') raise NotImplementedError(f'mshID={mesh.mshID} not handled.') @staticmethod def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType: """Read mesh from a file on disk Parameters ---------- path : path-like Path to the file containig mesh. crs : CRS or None, default=None CRS of the mesh in the path. Overwrites any info read from file, no transformation is done. Returns ------- MeshType Mesh object created by reading the file. Raises ------ TypeError If cannot determine the input mesh type. Notes ----- Currently only SMS-2DM and GRD formats are supported for reading. """ try: msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs)) msh_t.value = np.negative(msh_t.value) return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 if 'not a valid grd file' in str(e): pass else: raise e try: return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs))) except ValueError: pass try: msh_t = jigsaw_msh_t() loadmsh(msh_t, path) msh_t.crs = crs return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 pass raise TypeError( f'Unable to automatically determine file type for {str(path)}.') class Rings: """Helper class for handling mesh rings. This is a helper class to manage the calculation of internal and external rings of the mesh polygon or hull. Attributes ---------- Methods ------- __call__() Returns all rings of the mesh hull interior() Return the interior rings of the mesh hull exterior() Return the exterior rings of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the ring calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates rings. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ polys = utils.get_mesh_polygons(self.mesh.msh_t) data = [] bnd_id = 0 for poly in polys: data.append({ "geometry": poly.exterior, "bnd_id": bnd_id, "type": 'exterior' }) for interior in poly.interiors: data.append({ "geometry": interior, "bnd_id": bnd_id, "type": 'interior' }) bnd_id = bnd_id + 1 return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Extracts the exterior ring from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior ring of the mesh hull polygon. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Extracts the interior rings from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior rings of the mesh hull polygon. """ return self().loc[self()['type'] == 'interior'] class Edges: """Helper class for handling mesh boundary edges. Attributes ---------- Methods ------- __call__() Return all boundary edges of the mesh hull interior() Return the interior boundary edges of the mesh hull exterior() Return the exterior boundary edges of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the edge calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which boundary edges are calculated. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for ring in self.mesh.hull.rings().itertuples(): coords = ring.geometry.coords for i in range(1, len(coords)): data.append({ "geometry": LineString([coords[i-1], coords[i]]), "bnd_id": ring.bnd_id, "type": ring.type}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Retruns exterior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Retruns interior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'interior'] class Hull: """Helper class for handling mesh hull calculations. This class wraps the functionality of ring and edge classes and adds additional methods to calculate or extract the polygon or triangulation of the mesh Attributes ---------- Methods ------- __call__() Calculates all the polys from all mesh rings exterior() Calculates the exterior rings of the mesh hull. interior() Calculates the interior rings of the mesh hull. implode() Calculates all the polygons (including isolated domain islands) in the mesh and returns a table of polygons. multipolygon() Calculates all the polygons (including isolated domain islands) in the mesh and returns a multipolygon. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize helper class for handling mesh hull calculations Parameters ---------- mesh : EuclideanMesh Input mesh for which hull calculations are done. Notes ----- This object holds onto the ring and edge calculator objects as well as a reference to the input mesh. """ self.mesh = mesh self.rings = Rings(mesh) self.edges = Edges(mesh) @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for bnd_id in np.unique(self.rings()['bnd_id'].tolist()): exterior = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior')] interiors = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior')] data.append({ "geometry": Polygon( exterior.iloc[0].geometry.coords, [row.geometry.coords for _, row in interiors.iterrows()]), "bnd_id": bnd_id }) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Creates polygons from exterior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from exterior rings of the mesh hull """ data = [] for exterior in self.rings().loc[ self.rings()['type'] == 'exterior'].itertuples(): data.append({"geometry": Polygon(exterior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def interior(self) -> gpd.GeoDataFrame: """Creates polygons from interior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from interior rings of the mesh hull """ data = [] for interior in self.rings().loc[ self.rings()['type'] == 'interior'].itertuples(): data.append({"geometry": Polygon(interior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def implode(self) -> gpd.GeoDataFrame: """Creates a dataframe from mesh polygons. Parameters ---------- Returns ------ gpd.GeoDataFrame Dataframe containing polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The difference of the return value of this method and `__call__` is that the `implode` returns a dataframe with a single `MultiPolygon` where as `__call__` returns a dataframe with multiple `Polygon` entries with associated `bnd_id`. """ return gpd.GeoDataFrame( {"geometry": MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) def multipolygon(self) -> MultiPolygon: """Returns mesh multi-polygons. Parameters ---------- Returns ------ MultiPolygon Combined shape of polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. implode() Dataframe with a single combined multipolygon of the mesh polygons. Notes ----- The difference of the return value of this method and `implode` is that `multipolygon` returns a `MultiPolygon` object where as `implode` returns a dataframe warpping the multipolygon object. """ mp = self.implode().iloc[0].geometry if isinstance(mp, Polygon): mp = MultiPolygon([mp]) return mp def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.mesh.msh_t.tria3['index'].tolist() for quad in self.mesh.msh_t.quad4['index']: triangles.extend([ [quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]] ]) return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) class Nodes: """Helper class for handling mesh nodes. Attributes ---------- id_to_index : dict Mapping to convert node IDs to node indexes. index_to_id : dict Mapping to convert node indexes to node IDs. Methods ------- __call__() Creates a mapping between node IDs (index + 1) and node coordinates id() Returns list of node IDs. index() Return array of node indices. coords() Return mesh coordinates. values() Return values stored for mesh nodes. get_index_by_id(node_id) Get the node index based on node ID. get_id_by_index(index) Get the node ID based on the node index. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes node handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles nodes info. """ self.mesh = mesh self._id_to_index = None self._index_to_id = None @lru_cache(maxsize=1) def __call__(self) -> Dict[int, int]: """Creates a mapping between node IDs and indexes. Parameters ---------- Returns ------- dict Mapping between node IDs and indexes. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return {i+1: coord for i, coord in enumerate(self.coords())} def id(self) -> List[int]: """Retrives a list of element IDs. Parameters ---------- Returns ------- list of int List of node IDs as created by `__call__` """ return list(self().keys()) def index(self) -> npt.NDArray[int]: """Retrives an array of element indexes. Parameters ---------- Returns ------- array-like Array of node indexes. """ return np.arange(len(self())) def coords(self) -> npt.NDArray[np.float32]: """Retrieve the coordinates of mesh nodes Parameters ---------- Returns ------- array-like Coordinates of the mesh nodes as returned by `BaseMesh.coord` """ return self.mesh.coord def values(self): """Retrieve the values stored for mesh nodes Parameters ---------- Returns ------- array-like Values on the mesh nodes as returned by `BaseMesh.values` """ return self.mesh.values def get_index_by_id(self, node_id): """Converts mesh ID to mesh index. Parameters ---------- node_id : int ID of the node of interest Returns ------- int Index of the node of interest """ return self.id_to_index[node_id] def get_id_by_index(self, index: int): """Converts mesh index to mesh ID. Parameters ---------- index : int Index of the node of interest. Returns ------- int ID of the node of interest """ return self.index_to_id[index] @property def id_to_index(self) -> Dict[int, int]: """Read-only property returning the mapping of ID to index Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._id_to_index is None: self._id_to_index = {node_id: index for index, node_id in enumerate(self().keys())} return self._id_to_index @property def index_to_id(self) -> Dict[int, int]: """Read-only property returning the mapping of index to ID Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._index_to_id is None: self._index_to_id = dict(enumerate(self().keys())) return self._index_to_id # def get_indexes_around_index(self, index): # indexes_around_index = self.__dict__.get('indexes_around_index') # if indexes_around_index is None: # def append(geom): # for simplex in geom: # for i, j in permutations(simplex, 2): # indexes_around_index[i].add(j) # indexes_around_index = defaultdict(set) # append(self.gr3.elements.triangles()) # append(self.gr3.elements.quads()) # self.__dict__['indexes_around_index'] = indexes_around_index # return list(indexes_around_index[index]) class Elements: """Helper class for handling mesh elements. Attributes ---------- Methods -------- __call__() Creates a mapping between element IDs and associated node IDs. id() Returns a list of element IDs. index() Returns an array of element indexes. array() Creates and returns a masked array of element node indices. triangles() Creates and returns a 2D array of triangular element node indices. quads() Creates and returns a 2D array of quadrangular element node indices. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. geodataframe() Creates and returns a dataframe of with polygon entires for each element. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize the element handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles elements info. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> Dict[int, npt.NDArray[int]]: """Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ elements = {i+1: index+1 for i, index in enumerate(self.mesh.msh_t.tria3['index'])} elements.update({i+len(elements)+1: index+1 for i, index in enumerate(self.mesh.msh_t.quad4['index'])}) return elements @lru_cache(maxsize=1) def id(self) -> List[int]: """Retrieves the list of element IDs as returned by `__call__` Parameters ---------- Returns ------- list of int List of element IDs. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return list(self().keys()) @lru_cache(maxsize=1) def index(self) -> npt.NDArray[int]: """Retrieves an array of element indices Parameters ---------- Returns ------- npt.NDArray 1D array of element indices. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.arange(len(self())) def array(self) -> npt.NDArray[int]: """Retrieves a masked array of element node IDs. The return value is ``n x m`` where ``n`` is the number of elements and ``m`` is the maximum number of element nodes, e.g. if there are only trias, then it's 3, for trias and quads it is 4. Parameters ---------- Returns ------- npt.NDArray Masked array where elements with fewer associated nodes have trailing masked node columns in the array. """ rank = int(max(map(len, self().values()))) array = np.full((len(self()), rank), -1) for i, elem_nd_ids in enumerate(self().values()): row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))) array[i, :len(row)] = row return np.ma.masked_equal(array, -1) @lru_cache(maxsize=1) def triangles(self) -> npt.NDArray[int]: """Retrieves an array of tria element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for triangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 3]) @lru_cache(maxsize=1) def quads(self): """Retrieves an array of quad element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for quadrangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 4]) def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.triangles().tolist() for quad in self.quads(): # TODO: Not tested. triangles.append([quad[0], quad[1], quad[3]]) triangles.append([quad[1], quad[2], quad[3]]) return Triangulation( self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) def geodataframe(self) -> gpd.GeoDataFrame: """Create polygons for each element and return in dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe created from entries of `Polygon` type for each element. """ data = [] for elem_id, elem_nd_ids in self().items(): data.append({ 'geometry': Polygon( self.mesh.coord[list( map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) class Boundaries: """Helper class for mesh boundary condition calculation Attributes ---------- data : dict Mapping for boundary information Methods ------- __call__() Retrieves a dataframe for all boundary shapes and type info. __len__() Gets the number of calculated boundary segments. ocean() Retrieves a dataframe containing shapes and type info of ocean boundaries land() Retrieves a dataframe containing shapes and type info of land boundaries interior() Retrieves a dataframe containing shapes and type info of island boundaries auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1) Automatically generate boundary information based on the input land indicator `threshold` """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize boundary helper object Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates boundaries. """ # TODO: Add a way to manually initialize self.mesh = mesh self._ocean = gpd.GeoDataFrame() self._land = gpd.GeoDataFrame() self._interior = gpd.GeoDataFrame() self._data = defaultdict(defaultdict) @lru_cache(maxsize=1) def _init_dataframes(self) -> None: """Internal: Creates boundary dataframes based on boundary data Parameters ---------- Returns ------- None Notes ----- This method doesn't have any return value, but it is cached so that on re-execution it doesn't recalculate. """ boundaries = self._data ocean_boundaries = [] land_boundaries = [] interior_boundaries = [] if boundaries is not None: for ibtype, bnds in boundaries.items(): if ibtype is None: for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) ocean_boundaries.append({ 'id': bnd_id, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) elif str(ibtype).endswith('1'): for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) interior_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) else: for bnd_id, data in bnds.items(): _indexes = np.array(data['indexes']) if _indexes.ndim > 1: # ndim > 1 implies we're dealing with an ADCIRC # mesh that includes boundary pairs, such as weir new_indexes = [] for i, line in enumerate(_indexes.T): if i % 2 != 0: new_indexes.extend(np.flip(line)) else: new_indexes.extend(line) _indexes = np.array(new_indexes).flatten() else: _indexes = _indexes.flatten() indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes)) land_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) self._ocean = gpd.GeoDataFrame(ocean_boundaries) self._land = gpd.GeoDataFrame(land_boundaries) self._interior = gpd.GeoDataFrame(interior_boundaries) def ocean(self) -> gpd.GeoDataFrame: """Retrieve the ocean boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of ocean open boundary. """ self._init_dataframes() return self._ocean def land(self): """Retrieve the land boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of land boundary. """ self._init_dataframes() return self._land def interior(self): """Retrieve the island boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of island boundary. """ self._init_dataframes() return self._interior @property def data(self) -> Dict[Optional[int], Any]: """Read-only property referencing the boundary data dictionary""" return self._data # MASKED: __call__ function (lines 1697-1744) def __len__(self) -> int: """Returns the number of boundary segments""" return len(self()) def auto_generate( self, threshold: float = 0., land_ibtype: int = 0, interior_ibtype: int = 1, ): """Automatically detect boundaries based on elevation data. Parameters ---------- threshold : float, default=0 Threshold above which nodes are considered dry nodes for ocean vs land boundary detection land_ibtype : int, default=0 Value to assign to land boundary type interior_ibtype : int, default=1 Value to assign to island boundary type Returns ------- None Raises ------ ValueError If any of the values assigned to a mesh node is `np.nan`. Notes ----- An edge is considered dry if any of the attached nodes are dry (its elevation is larger than or equal to the `threshold`). """ values = self.mesh.value if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") coords = self.mesh.msh_t.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} polys = utils.get_mesh_polygons(self.mesh.msh_t) # TODO: Split using shapely to get bdry segments boundaries = defaultdict(defaultdict) bdry_type = dict get_id = self.mesh.nodes.get_id_by_index # generate exterior boundaries for poly in polys: ext_ring_coo = poly.exterior.coords ext_ring = np.array([ (coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[e + 1]]) for e, coo in enumerate(ext_ring_coo[:-1])]) # find boundary edges edge_tag = np.full(ext_ring.shape, 0) edge_tag[ np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1 edge_tag[ np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1 edge_tag[ np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1 edge_tag[ np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1 # sort boundary edges ocean_boundary = [] land_boundary = [] for i, (e0, e1) in enumerate(edge_tag): if np.any(np.asarray((e0, e1)) == 1): land_boundary.append(tuple(ext_ring[i, :])) elif np.any(np.asarray((e0, e1)) == -1): ocean_boundary.append(tuple(ext_ring[i, :])) # ocean_boundaries = utils.sort_edges(ocean_boundary) # land_boundaries = utils.sort_edges(land_boundary) ocean_boundaries = [] if len(ocean_boundary) != 0: #pylint: disable=not-an-iterable ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist()) ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs ocean_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in ocean_segs] land_boundaries = [] if len(land_boundary) != 0: #pylint: disable=not-an-iterable land_segs = linemerge(coords[np.array(land_boundary)].tolist()) land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs land_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in land_segs] _bnd_id = len(boundaries[None]) for bnd in ocean_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[None][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # add land boundaries _bnd_id = len(boundaries[land_ibtype]) for bnd in land_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[land_ibtype][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # generate interior boundaries _bnd_id = 0 interior_boundaries = defaultdict() for poly in polys: interiors = poly.interiors for interior in interiors: int_ring_coo = interior.coords int_ring = [ (coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[e + 1]]) for e, coo in enumerate(int_ring_coo[:-1])] # TODO: Do we still need these? e0, e1 = [list(t) for t in zip(*int_ring)] if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0: e0 = e0[::-1] e1 = e1[::-1] e0 = [get_id(vert) for vert in e0] e0.append(e0[0]) interior_boundaries[_bnd_id] = e0 _bnd_id += 1 for bnd_id, data in interior_boundaries.items(): boundaries[interior_ibtype][bnd_id] = bdry_type( indexes=data, properties={}) self._data = boundaries self._init_dataframes.cache_clear() self.__call__.cache_clear() self._init_dataframes() SortedRingType = Dict[int, Dict[Literal['exterior', 'interiors'], Union[npt.NDArray, List[npt.NDArray]]] ] def sort_rings( index_rings: List[List[Tuple[int, int]]], vertices: npt.NDArray[np.float32]) -> SortedRingType: """Sorts a list of index-rings. Takes a list of unsorted index rings and sorts them into "exterior" and "interior" components. Any doubly-nested rings are considered exterior rings. Parameters ---------- index_rings : List[List[Tuple[int, int]]] Unosorted list of list of mesh edges as specified by end node indexs of each edge. vertices : npt.NDArray[np.float32] 2D ``n x 2`` array of node coordinate couples. Returns ------- SortedRingType Dictionary of information aboout polygon boundaries extracted based on the input Notes ----- The return value is a mapping of ring index to dictionary containing exterior and interior linear ring information as numpy array This function is not currently used, instead a different faster approach is used for boundary and polygon calculation from elements. """ # TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can # probably be optimized using shapely. # sort index_rings into corresponding "polygons" areas = [] for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = {} _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = [] for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = [] for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings def _mesh_interpolate_worker( coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[str, Path], chunk_size: Optional[int], method: Literal['spline', 'linear', 'nearest'] = "spline", filter_by_shape: bool = False): """Interpolator worker function to be used in parallel calls Parameters ---------- coords : npt.NDArray[np.float32] Mesh node coordinates. coords_crs : CRS Coordinate reference system of the input mesh coordinates. raster_path : str or Path Path to the raster temporary working file. chunk_size : int or None Chunk size for windowing over the raster. method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- idxs : npt.NDArray[bool] Mask of the nodes whose values are updated by current interpolation values : npt.NDArray[np.float32] Interpolated values. Raises ------ ValueError If specified interpolation `method` is not supported. """ coords = np.array(coords) raster = Raster(raster_path) idxs = [] values = [] for window in raster.iter_windows(chunk_size=chunk_size, overlap=2): if not raster.crs.equals(coords_crs): transformer = Transformer.from_crs( coords_crs, raster.crs, always_xy=True) # pylint: disable=E0633 coords[:, 0], coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) xi = raster.get_x(window) yi = raster.get_y(window) # Use masked array to ignore missing values from DEM zi = raster.get_values(window=window, masked=True) if not filter_by_shape: _idxs = np.logical_and( np.logical_and( np.min(xi) <= coords[:, 0], np.max(xi) >= coords[:, 0]), np.logical_and( np.min(yi) <= coords[:, 1], np.max(yi) >= coords[:, 1])) else: shape = raster.get_multipolygon() gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1]) _idxs = gs_pt.intersects(shape) interp_mask = None if method == 'spline': f = RectBivariateSpline( xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0, # bbox=[min(x), max(x), min(y), max(y)] # ?? ) _values = f.ev(coords[_idxs, 0], coords[_idxs, 1]) elif method in ['nearest', 'linear']: # Inspired by StackOverflow 35807321 if np.any(zi.mask): m_interp = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method ) # Pick nodes NOT "contaminated" by masked values interp_mask = m_interp(coords[_idxs]) > 0 f = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi).T, method=method ) _values = f(coords[_idxs]) else: raise ValueError( f"Invalid value method specified <{method}>!") if interp_mask is not None: # pylint: disable=invalid-unary-operand-type helper = np.ones_like(_values).astype(bool) helper[interp_mask] = False # _idxs is inverse mask _idxs[_idxs] = helper _values = _values[~interp_mask] idxs.append(_idxs) values.append(_values) return (np.hstack(idxs), np.hstack(values))
@lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ self._init_dataframes() data = [] for bnd in self.ocean().itertuples(): data.append({ 'id': bnd.id, 'ibtype': None, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.land().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.interior().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) return gpd.GeoDataFrame(data, crs=self.mesh.crs)
1,697
1,744
"""This module defines classes that handle mesh and mesh operations. This module defines a factory class for mesh, similar to geometry and size function factory class. It also defines concrete mesh types. Currently two concrete mesh types are defined for generic Eucledian mesh and specific 2D Eucledian mesh. """ from functools import lru_cache import logging from multiprocessing import Pool, cpu_count import os import pathlib from collections import defaultdict import warnings from typing import Union, List, Tuple, Dict, Any, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal import pandas as pd import geopandas as gpd from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk from matplotlib.path import Path from matplotlib.transforms import Bbox from matplotlib.tri import Triangulation from matplotlib.axes import Axes import matplotlib.pyplot as plt import numpy as np import numpy.typing as npt from pyproj import CRS, Transformer from scipy.interpolate import ( RectBivariateSpline, RegularGridInterpolator) from shapely.geometry import ( LineString, box, Polygon, MultiPolygon) from shapely.ops import polygonize, linemerge from ocsmesh import utils from ocsmesh.raster import Raster from ocsmesh.mesh.base import BaseMesh from ocsmesh.mesh.parsers import grd, sms2dm _logger = logging.getLogger(__name__) class EuclideanMesh(BaseMesh): """Generic Euclidean mesh class This is the base class for 2D or 3D Euclidean mesh. Attributes ---------- tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t] Reference to underlying jigsaw mesh's triangle element structure. triangles : npt.NDArray[np.float32] Array of node index for triangular elements. quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t] Reference to underlying jigsaw mesh's quadrangle element structure. quads : npt.NDArray[np.float32] Array of node index for quadrangular elements. crs : CRS Coodrinate reference system of the mesh object hull : Hull Handle to hull calculation helper object nodes : Nodes Handle to node handler helper object elements : Elements Handle to element handler helper object Methods ------- write(path, overwrite=False, format='grd') Export mesh object to the disk in the specified format. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ TypeError If input mesh is not of `jigsaw_msh_t` type. ValueError If input mesh's `mshID` is not equal to ``euclidean-mesh``. If input mesh has `crs` property which is not of `CRS` type. """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID != 'euclidean-mesh': raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, ' "but expected 'euclidean-mesh'.") if not hasattr(mesh, 'crs'): warnings.warn('Input mesh has no CRS information.') mesh.crs = None else: if not isinstance(mesh.crs, CRS): raise ValueError(f'crs property must be of type {CRS}, not ' f'type {type(mesh.crs)}.') self._hull = None self._nodes = None self._elements = None self._msh_t = mesh def write( self, path: Union[str, os.PathLike], overwrite: bool = False, format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622 ) -> None: """Export the mesh object to the disk Parameters ---------- path : path-like Path to which the mesh should be exported. overwrite : bool, default=False Whether to overwrite, if a file already exists in `path` format : { 'grd', '2dm', 'msh', 'vtk' } Format of the export, SMS-2DM or GRD. Returns ------- None Raises ------ ValueError If specified export format is **not** supported. """ path = pathlib.Path(path) if path.exists() and overwrite is not True: raise IOError( f'File {str(path)} exists and overwrite is not True.') if format == 'grd': grd_dict = utils.msh_t_to_grd(self.msh_t) if self._boundaries and self._boundaries.data: grd_dict.update(boundaries=self._boundaries.data) grd.write(grd_dict, path, overwrite) elif format == '2dm': sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite) elif format == 'msh': savemsh(str(path), self.msh_t) elif format == 'vtk': savevtk(str(path), self.msh_t) else: raise ValueError(f'Unhandled format {format}.') @property def tria3(self): """Reference to underlying mesh tirangle element structure""" return self.msh_t.tria3 @property def triangles(self): """Reference to underlying mesh triangle element index array""" return self.msh_t.tria3['index'] @property def quad4(self): """Reference to underlying mesh quadrangle element structure""" return self.msh_t.quad4 @property def quads(self): """Reference to underlying mesh quadrangle element index array""" return self.msh_t.quad4['index'] @property def crs(self): """Reference to underlying mesh crs""" return self.msh_t.crs @property def hull(self): """Reference to hull calculator helper object""" if self._hull is None: self._hull = Hull(self) return self._hull @property def nodes(self): """Reference to node handler helper object""" if self._nodes is None: self._nodes = Nodes(self) return self._nodes @property def elements(self): """Reference to element handler helper object""" if self._elements is None: self._elements = Elements(self) return self._elements class EuclideanMesh2D(EuclideanMesh): """2D Euclidean mesh definition Attributes ---------- boundaries vert2 value bbox Methods ------- get_bbox(crs=None, output_type=None) Gets the bounding box of the mesh elements. tricontourf(**kwargs) Create a contour plot from the value data on the nodes of the mesh interpolate(raster, method='spline', nprocs=None) Interpolate raster date on the nodes. get_contour(level) Get contour lines from node value data at specified levels. get_multipolygon(zmin=None, zmax=None) Get multipolygon of the mesh hull. """ def __init__(self, mesh: jigsaw_msh_t) -> None: """Initialize Euclidean 2D mesh object. Parameters ---------- mesh : jigsaw_msh_t The underlying jigsaw_msh_t object to hold onto mesh data. Raises ------ ValueError If number of mesh dimensions is not equal to ``2``. """ super().__init__(mesh) self._boundaries = None if mesh.ndims != +2: raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, ' "but expected ndims=2.") if len(self.msh_t.value) == 0: self.msh_t.value = np.array( np.full((self.vert2['coord'].shape[0], 1), np.nan)) def get_bbox( self, crs: Union[str, CRS, None] = None, output_type: Literal[None, 'polygon', 'bbox'] = None ) -> Union[Polygon, Bbox]: """Get the bounding box of mesh elements. Parameters ---------- crs : str or CRS or None, default=None CRS to transform the calculated bounding box into before returning output_type : { None, 'polygon', 'bbox'}, default=None Output type Returns ------- Polygon or Bbox Bounding box of the mesh elements. """ output_type = 'polygon' if output_type is None else output_type xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0]) ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1]) crs = self.crs if crs is None else crs if crs is not None: if not self.crs.equals(crs): transformer = Transformer.from_crs( self.crs, crs, always_xy=True) # pylint: disable=E0633 (xmin, xmax), (ymin, ymax) = transformer.transform( (xmin, xmax), (ymin, ymax)) if output_type == 'polygon': # pylint: disable=R1705 return box(xmin, ymin, xmax, ymax) elif output_type == 'bbox': return Bbox([[xmin, ymin], [xmax, ymax]]) raise TypeError( 'Argument output_type must a string literal \'polygon\' or ' '\'bbox\'') @property def boundaries(self): """Handle to boundaries calculator helper object""" if self._boundaries is None: self._boundaries = Boundaries(self) return self._boundaries def tricontourf(self, **kwargs) -> Axes: """Generate contour for the data of triangular elements of the mesh Parameters ---------- **kwargs : dict, optional Passed to underlying `matplotlib` API. Returns ------- Axes Axes on which the filled contour is drawn. """ return utils.tricontourf(self.msh_t, **kwargs) def interpolate( self, raster: Union[Raster, List[Raster]], method: Literal['spline', 'linear', 'nearest'] = 'spline', nprocs: Optional[int] = None, info_out_path: Union[pathlib.Path, str, None] = None, filter_by_shape: bool = False ) -> None: """Interplate values from raster inputs to the mesh nodes. Parameters ---------- raster : Raster or list of Raster A single or a list of rasters from which values are interpolated onto the mesh method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. nprocs : int or None, default=None Number of workers to use when interpolating data. info_out_path : pathlike or str or None Path for the output node interpolation information file filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- None """ if isinstance(raster, Raster): raster = [raster] nprocs = -1 if nprocs is None else nprocs nprocs = cpu_count() if nprocs == -1 else nprocs # Fix an issue on Jupyter notebook where having pool execute # interpolation even in case of nprocs == 1 would results in # application getting stuck if nprocs > 1: with Pool(processes=nprocs) as pool: res = pool.starmap( _mesh_interpolate_worker, [(self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] ) pool.join() else: res = [_mesh_interpolate_worker( self.vert2['coord'], self.crs, _raster.tmpfile, _raster.chunk_size, method, filter_by_shape) for _raster in raster] values = self.msh_t.value.flatten() interp_info_map = {} for (mask, _values), rast in zip(res, raster): values[mask] = _values if info_out_path is not None: vert_cs = None rast_crs = rast.crs if rast_crs.is_vertical: if rast_crs.sub_crs_list is not None: for sub_crs in rast_crs.sub_crs_list: if sub_crs.is_vertical: # TODO: What if sub CRS is compound, etc.? vert_cs = sub_crs elif rast_crs.source_crs is not None: if rast_crs.source_crs.is_vertical: # TODO: What if source CRS is compound, etc.? vert_cs = rast_crs.source_crs vert_cs_name = vert_cs.name idxs = np.argwhere(mask).ravel() interp_info_map.update({ idx: (rast.path, vert_cs_name) for idx in idxs}) if info_out_path is not None: coords = self.msh_t.vert2['coord'].copy() geo_coords = coords.copy() if not self.crs.is_geographic: transformer = Transformer.from_crs( self.crs, CRS.from_epsg(4326), always_xy=True) # pylint: disable=E0633 geo_coords[:, 0], geo_coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) vd_idxs=np.array(list(interp_info_map.keys())) df_interp_info = pd.DataFrame( index=vd_idxs, data={ 'x': coords[vd_idxs, 0], 'y': coords[vd_idxs, 1], 'lat': geo_coords[vd_idxs, 0], 'lon': geo_coords[vd_idxs, 1], 'elev': values[vd_idxs], 'crs': [i[1] for i in interp_info_map.values()], 'source': [i[0] for i in interp_info_map.values()] } ) df_interp_info.sort_index().to_csv( info_out_path, header=False, index=True) self.msh_t.value = np.array(values.reshape((values.shape[0], 1)), dtype=jigsaw_msh_t.REALS_t) def get_contour(self, level: float) -> LineString: """Extract contour lines at the specified `level` from mesh values Parameters ---------- level : float The level at which contour lines must be extracted. Returns ------- LineString Extracted and merged contour lines. Raises ------ ValueError If mesh has nodes that have null value `np.nan`. """ # ONLY SUPPORTS TRIANGLES for attr in ['quad4', 'hexa8']: if len(getattr(self.msh_t, attr)) > 0: warnings.warn( 'Mesh contour extraction only supports triangles') coords = self.msh_t.vert2['coord'] values = self.msh_t.value trias = self.msh_t.tria3['index'] if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") x, y = coords[:, 0], coords[:, 1] features = [] with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) _logger.debug('Computing contours...') fig, ax = plt.subplots() ax.tricontour( x, y, trias, values.ravel(), levels=[level]) plt.close(fig) for path_collection in ax.collections: for path in path_collection.get_paths(): try: features.append(LineString(path.vertices)) except ValueError: # LineStrings must have at least 2 coordinate tuples pass return linemerge(features) def get_multipolygon( self, zmin: Optional[float] = None, zmax: Optional[float] = None ) -> MultiPolygon: """Calculate multipolygon covering mesh elements (hull) Parameters ---------- zmin : float or None Minimum elevation to consider for multipolygon extraction zmax : float or None Maximum elevation to consider for multipolygon extraction Returns ------- MultiPolygon Calculated multipolygon shape """ values = self.msh_t.value mask = np.ones(values.shape) if zmin is not None: mask = np.logical_and(mask, values > zmin) if zmax is not None: mask = np.logical_and(mask, values < zmax) # Assuming value is of shape (N, 1) # ravel to make sure it's 1D verts_in = np.argwhere(mask).ravel() clipped_mesh = utils.clip_mesh_by_vertex( self.msh_t, verts_in, can_use_other_verts=True) boundary_edges = utils.get_boundary_edges(clipped_mesh) coords = clipped_mesh.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} poly_gen = polygonize(coords[boundary_edges]) polys = list(poly_gen) polys = sorted(polys, key=lambda p: p.area, reverse=True) rings = [p.exterior for p in polys] n_parents = np.zeros((len(rings),)) represent = np.array([r.coords[0] for r in rings]) for e, ring in enumerate(rings[:-1]): path = Path(ring.coords, closed=True) n_parents = n_parents + np.pad( np.array([ path.contains_point(pt) for pt in represent[e+1:]]), (e+1, 0), 'constant', constant_values=0) # Get actual polygons based on logic described above polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2] return MultiPolygon(polys) @property def vert2(self): """Reference to underlying mesh 2D vertices structure""" return self.msh_t.vert2 @property def value(self): """Reference to underlying mesh values""" return self.msh_t.value @property def bbox(self): """Calculates and returns bounding box of the mesh hull. See Also -------- get_bbox """ return self.get_bbox() MeshType = Union[EuclideanMesh2D] class Mesh(BaseMesh): """Mesh object factory Factory class that creates and returns concrete mesh object based on the input types. Methods ------- open(path, crs=None) Read mesh data from a file on disk. """ def __new__(cls, mesh: jigsaw_msh_t) -> MeshType: """Construct a concrete mesh object. Parameters ---------- mesh : jigsaw_msh_t Input jigsaw mesh object Returns ------- MeshType Mesh object created from the input Raises ------ TypeError Input `mesh` is not a `jigsaw_msh_t` object. NotImplementedError Input `mesh` object cannot be used to create a EuclideanMesh2D """ if not isinstance(mesh, jigsaw_msh_t): raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, ' f'not type {type(mesh)}.') if mesh.mshID == 'euclidean-mesh': if mesh.ndims == 2: return EuclideanMesh2D(mesh) raise NotImplementedError( f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not ' 'handled.') raise NotImplementedError(f'mshID={mesh.mshID} not handled.') @staticmethod def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType: """Read mesh from a file on disk Parameters ---------- path : path-like Path to the file containig mesh. crs : CRS or None, default=None CRS of the mesh in the path. Overwrites any info read from file, no transformation is done. Returns ------- MeshType Mesh object created by reading the file. Raises ------ TypeError If cannot determine the input mesh type. Notes ----- Currently only SMS-2DM and GRD formats are supported for reading. """ try: msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs)) msh_t.value = np.negative(msh_t.value) return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 if 'not a valid grd file' in str(e): pass else: raise e try: return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs))) except ValueError: pass try: msh_t = jigsaw_msh_t() loadmsh(msh_t, path) msh_t.crs = crs return Mesh(msh_t) except Exception as e: #pylint: disable=W0703 pass raise TypeError( f'Unable to automatically determine file type for {str(path)}.') class Rings: """Helper class for handling mesh rings. This is a helper class to manage the calculation of internal and external rings of the mesh polygon or hull. Attributes ---------- Methods ------- __call__() Returns all rings of the mesh hull interior() Return the interior rings of the mesh hull exterior() Return the exterior rings of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the ring calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates rings. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calcluates all the polygons of the mesh and extracts its rings. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all rings of the mesh hull polygon. The rings are in the form of `shapely.geometry.LinearRing`. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ polys = utils.get_mesh_polygons(self.mesh.msh_t) data = [] bnd_id = 0 for poly in polys: data.append({ "geometry": poly.exterior, "bnd_id": bnd_id, "type": 'exterior' }) for interior in poly.interiors: data.append({ "geometry": interior, "bnd_id": bnd_id, "type": 'interior' }) bnd_id = bnd_id + 1 return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Extracts the exterior ring from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior ring of the mesh hull polygon. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Extracts the interior rings from the results of `__call__`. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior rings of the mesh hull polygon. """ return self().loc[self()['type'] == 'interior'] class Edges: """Helper class for handling mesh boundary edges. Attributes ---------- Methods ------- __call__() Return all boundary edges of the mesh hull interior() Return the interior boundary edges of the mesh hull exterior() Return the exterior boundary edges of the mesh hull """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes the edge calculator object for the input `mesh` Parameters ---------- mesh : EuclideanMesh Input mesh for which boundary edges are calculated. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all boundary edges for the mesh. Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all boundary edges of the mesh in the form of `shapely.geometry.LineString` for each coordinate couple. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for ring in self.mesh.hull.rings().itertuples(): coords = ring.geometry.coords for i in range(1, len(coords)): data.append({ "geometry": LineString([coords[i-1], coords[i]]), "bnd_id": ring.bnd_id, "type": ring.type}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Retruns exterior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing exterior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'exterior'] def interior(self) -> gpd.GeoDataFrame: """Retruns interior boundary edges from the results of `__call__` Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing interior boundary edges of the mesh in the form of line string couples. """ return self().loc[self()['type'] == 'interior'] class Hull: """Helper class for handling mesh hull calculations. This class wraps the functionality of ring and edge classes and adds additional methods to calculate or extract the polygon or triangulation of the mesh Attributes ---------- Methods ------- __call__() Calculates all the polys from all mesh rings exterior() Calculates the exterior rings of the mesh hull. interior() Calculates the interior rings of the mesh hull. implode() Calculates all the polygons (including isolated domain islands) in the mesh and returns a table of polygons. multipolygon() Calculates all the polygons (including isolated domain islands) in the mesh and returns a multipolygon. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize helper class for handling mesh hull calculations Parameters ---------- mesh : EuclideanMesh Input mesh for which hull calculations are done. Notes ----- This object holds onto the ring and edge calculator objects as well as a reference to the input mesh. """ self.mesh = mesh self.rings = Rings(mesh) self.edges = Edges(mesh) @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Calculates all polygons of the mesh including domain islands Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing all polygons of the mesh. See Also -------- implode() Dataframe with a single combined multipolygon. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ data = [] for bnd_id in np.unique(self.rings()['bnd_id'].tolist()): exterior = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'exterior')] interiors = self.rings().loc[ (self.rings()['bnd_id'] == bnd_id) & (self.rings()['type'] == 'interior')] data.append({ "geometry": Polygon( exterior.iloc[0].geometry.coords, [row.geometry.coords for _, row in interiors.iterrows()]), "bnd_id": bnd_id }) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def exterior(self) -> gpd.GeoDataFrame: """Creates polygons from exterior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from exterior rings of the mesh hull """ data = [] for exterior in self.rings().loc[ self.rings()['type'] == 'exterior'].itertuples(): data.append({"geometry": Polygon(exterior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def interior(self) -> gpd.GeoDataFrame: """Creates polygons from interior rings of the mesh hull Parameters ---------- Returns ------- gpd.GeoDataFrame Polygons created from interior rings of the mesh hull """ data = [] for interior in self.rings().loc[ self.rings()['type'] == 'interior'].itertuples(): data.append({"geometry": Polygon(interior.geometry.coords)}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def implode(self) -> gpd.GeoDataFrame: """Creates a dataframe from mesh polygons. Parameters ---------- Returns ------ gpd.GeoDataFrame Dataframe containing polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. multipolygon() `shapely` multipolygon shape of combined mesh polygons. Notes ----- The difference of the return value of this method and `__call__` is that the `implode` returns a dataframe with a single `MultiPolygon` where as `__call__` returns a dataframe with multiple `Polygon` entries with associated `bnd_id`. """ return gpd.GeoDataFrame( {"geometry": MultiPolygon([polygon.geometry for polygon in self().itertuples()])}, crs=self.mesh.crs) def multipolygon(self) -> MultiPolygon: """Returns mesh multi-polygons. Parameters ---------- Returns ------ MultiPolygon Combined shape of polygons of the mesh. See Also -------- __call__() Dataframe with multiple polygon and boundary ID entries of the mesh polygons. implode() Dataframe with a single combined multipolygon of the mesh polygons. Notes ----- The difference of the return value of this method and `implode` is that `multipolygon` returns a `MultiPolygon` object where as `implode` returns a dataframe warpping the multipolygon object. """ mp = self.implode().iloc[0].geometry if isinstance(mp, Polygon): mp = MultiPolygon([mp]) return mp def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.mesh.msh_t.tria3['index'].tolist() for quad in self.mesh.msh_t.quad4['index']: triangles.extend([ [quad[0], quad[1], quad[3]], [quad[1], quad[2], quad[3]] ]) return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) class Nodes: """Helper class for handling mesh nodes. Attributes ---------- id_to_index : dict Mapping to convert node IDs to node indexes. index_to_id : dict Mapping to convert node indexes to node IDs. Methods ------- __call__() Creates a mapping between node IDs (index + 1) and node coordinates id() Returns list of node IDs. index() Return array of node indices. coords() Return mesh coordinates. values() Return values stored for mesh nodes. get_index_by_id(node_id) Get the node index based on node ID. get_id_by_index(index) Get the node ID based on the node index. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initializes node handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles nodes info. """ self.mesh = mesh self._id_to_index = None self._index_to_id = None @lru_cache(maxsize=1) def __call__(self) -> Dict[int, int]: """Creates a mapping between node IDs and indexes. Parameters ---------- Returns ------- dict Mapping between node IDs and indexes. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return {i+1: coord for i, coord in enumerate(self.coords())} def id(self) -> List[int]: """Retrives a list of element IDs. Parameters ---------- Returns ------- list of int List of node IDs as created by `__call__` """ return list(self().keys()) def index(self) -> npt.NDArray[int]: """Retrives an array of element indexes. Parameters ---------- Returns ------- array-like Array of node indexes. """ return np.arange(len(self())) def coords(self) -> npt.NDArray[np.float32]: """Retrieve the coordinates of mesh nodes Parameters ---------- Returns ------- array-like Coordinates of the mesh nodes as returned by `BaseMesh.coord` """ return self.mesh.coord def values(self): """Retrieve the values stored for mesh nodes Parameters ---------- Returns ------- array-like Values on the mesh nodes as returned by `BaseMesh.values` """ return self.mesh.values def get_index_by_id(self, node_id): """Converts mesh ID to mesh index. Parameters ---------- node_id : int ID of the node of interest Returns ------- int Index of the node of interest """ return self.id_to_index[node_id] def get_id_by_index(self, index: int): """Converts mesh index to mesh ID. Parameters ---------- index : int Index of the node of interest. Returns ------- int ID of the node of interest """ return self.index_to_id[index] @property def id_to_index(self) -> Dict[int, int]: """Read-only property returning the mapping of ID to index Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._id_to_index is None: self._id_to_index = {node_id: index for index, node_id in enumerate(self().keys())} return self._id_to_index @property def index_to_id(self) -> Dict[int, int]: """Read-only property returning the mapping of index to ID Notes ----- Although the property is read-only, the return value object is a cached mutable dictionary object. Modifying the mesh without clearing the cache properly or mutating the returned object could result in undefined behavior """ if self._index_to_id is None: self._index_to_id = dict(enumerate(self().keys())) return self._index_to_id # def get_indexes_around_index(self, index): # indexes_around_index = self.__dict__.get('indexes_around_index') # if indexes_around_index is None: # def append(geom): # for simplex in geom: # for i, j in permutations(simplex, 2): # indexes_around_index[i].add(j) # indexes_around_index = defaultdict(set) # append(self.gr3.elements.triangles()) # append(self.gr3.elements.quads()) # self.__dict__['indexes_around_index'] = indexes_around_index # return list(indexes_around_index[index]) class Elements: """Helper class for handling mesh elements. Attributes ---------- Methods -------- __call__() Creates a mapping between element IDs and associated node IDs. id() Returns a list of element IDs. index() Returns an array of element indexes. array() Creates and returns a masked array of element node indices. triangles() Creates and returns a 2D array of triangular element node indices. quads() Creates and returns a 2D array of quadrangular element node indices. triangulation() Calcluates a triangulation from the triangles and quads of the mesh. geodataframe() Creates and returns a dataframe of with polygon entires for each element. """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize the element handler helper object. Parameters ---------- mesh : EuclideanMesh Input mesh for which this object handles elements info. """ self.mesh = mesh @lru_cache(maxsize=1) def __call__(self) -> Dict[int, npt.NDArray[int]]: """Creates a mapping between element IDs and associated node IDs. Parameters ---------- Returns ------- dict Mapping between element IDs and associated node Ids Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ elements = {i+1: index+1 for i, index in enumerate(self.mesh.msh_t.tria3['index'])} elements.update({i+len(elements)+1: index+1 for i, index in enumerate(self.mesh.msh_t.quad4['index'])}) return elements @lru_cache(maxsize=1) def id(self) -> List[int]: """Retrieves the list of element IDs as returned by `__call__` Parameters ---------- Returns ------- list of int List of element IDs. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return list(self().keys()) @lru_cache(maxsize=1) def index(self) -> npt.NDArray[int]: """Retrieves an array of element indices Parameters ---------- Returns ------- npt.NDArray 1D array of element indices. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.arange(len(self())) def array(self) -> npt.NDArray[int]: """Retrieves a masked array of element node IDs. The return value is ``n x m`` where ``n`` is the number of elements and ``m`` is the maximum number of element nodes, e.g. if there are only trias, then it's 3, for trias and quads it is 4. Parameters ---------- Returns ------- npt.NDArray Masked array where elements with fewer associated nodes have trailing masked node columns in the array. """ rank = int(max(map(len, self().values()))) array = np.full((len(self()), rank), -1) for i, elem_nd_ids in enumerate(self().values()): row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids))) array[i, :len(row)] = row return np.ma.masked_equal(array, -1) @lru_cache(maxsize=1) def triangles(self) -> npt.NDArray[int]: """Retrieves an array of tria element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for triangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 3]) @lru_cache(maxsize=1) def quads(self): """Retrieves an array of quad element node indices Parameters ---------- Returns ------- npt.NDArray 2D array of element nodes for quadrangle nodes Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ return np.array( [list(map(self.mesh.nodes.get_index_by_id, element)) for element in self().values() if len(element) == 4]) def triangulation(self) -> Triangulation: """Create triangulation object from all the mesh elements. Parameters ---------- Returns ------- Triangulation The `matplotlib` triangulation object create from all the elements of the parent mesh. Notes ----- Currently only tria3 and quad4 elements are considered. """ triangles = self.triangles().tolist() for quad in self.quads(): # TODO: Not tested. triangles.append([quad[0], quad[1], quad[3]]) triangles.append([quad[1], quad[2], quad[3]]) return Triangulation( self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles) def geodataframe(self) -> gpd.GeoDataFrame: """Create polygons for each element and return in dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe created from entries of `Polygon` type for each element. """ data = [] for elem_id, elem_nd_ids in self().items(): data.append({ 'geometry': Polygon( self.mesh.coord[list( map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]), 'id': elem_id}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) class Boundaries: """Helper class for mesh boundary condition calculation Attributes ---------- data : dict Mapping for boundary information Methods ------- __call__() Retrieves a dataframe for all boundary shapes and type info. __len__() Gets the number of calculated boundary segments. ocean() Retrieves a dataframe containing shapes and type info of ocean boundaries land() Retrieves a dataframe containing shapes and type info of land boundaries interior() Retrieves a dataframe containing shapes and type info of island boundaries auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1) Automatically generate boundary information based on the input land indicator `threshold` """ def __init__(self, mesh: EuclideanMesh) -> None: """Initialize boundary helper object Parameters ---------- mesh : EuclideanMesh Input mesh for which this object calculates boundaries. """ # TODO: Add a way to manually initialize self.mesh = mesh self._ocean = gpd.GeoDataFrame() self._land = gpd.GeoDataFrame() self._interior = gpd.GeoDataFrame() self._data = defaultdict(defaultdict) @lru_cache(maxsize=1) def _init_dataframes(self) -> None: """Internal: Creates boundary dataframes based on boundary data Parameters ---------- Returns ------- None Notes ----- This method doesn't have any return value, but it is cached so that on re-execution it doesn't recalculate. """ boundaries = self._data ocean_boundaries = [] land_boundaries = [] interior_boundaries = [] if boundaries is not None: for ibtype, bnds in boundaries.items(): if ibtype is None: for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) ocean_boundaries.append({ 'id': bnd_id, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) elif str(ibtype).endswith('1'): for bnd_id, data in bnds.items(): indexes = list(map(self.mesh.nodes.get_index_by_id, data['indexes'])) interior_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) else: for bnd_id, data in bnds.items(): _indexes = np.array(data['indexes']) if _indexes.ndim > 1: # ndim > 1 implies we're dealing with an ADCIRC # mesh that includes boundary pairs, such as weir new_indexes = [] for i, line in enumerate(_indexes.T): if i % 2 != 0: new_indexes.extend(np.flip(line)) else: new_indexes.extend(line) _indexes = np.array(new_indexes).flatten() else: _indexes = _indexes.flatten() indexes = list(map(self.mesh.nodes.get_index_by_id, _indexes)) land_boundaries.append({ 'id': bnd_id, 'ibtype': ibtype, "index_id": data['indexes'], "indexes": indexes, 'geometry': LineString(self.mesh.coord[indexes]) }) self._ocean = gpd.GeoDataFrame(ocean_boundaries) self._land = gpd.GeoDataFrame(land_boundaries) self._interior = gpd.GeoDataFrame(interior_boundaries) def ocean(self) -> gpd.GeoDataFrame: """Retrieve the ocean boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of ocean open boundary. """ self._init_dataframes() return self._ocean def land(self): """Retrieve the land boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of land boundary. """ self._init_dataframes() return self._land def interior(self): """Retrieve the island boundary information dataframe Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing the geometry and information of island boundary. """ self._init_dataframes() return self._interior @property def data(self) -> Dict[Optional[int], Any]: """Read-only property referencing the boundary data dictionary""" return self._data @lru_cache(maxsize=1) def __call__(self) -> gpd.GeoDataFrame: """Retrieve the dataframe for all boundaries information Parameters ---------- Returns ------- gpd.GeoDataFrame Dataframe containing information for all boundaries shape and type. Notes ----- The result of this method is cached, so that multiple calls to it won't result in multiple calculations. If the mesh is modified and the cache is not properly clear the calls to this method can result in invalid return values. """ self._init_dataframes() data = [] for bnd in self.ocean().itertuples(): data.append({ 'id': bnd.id, 'ibtype': None, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.land().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) for bnd in self.interior().itertuples(): data.append({ 'id': bnd.id, 'ibtype': bnd.ibtype, "index_id": bnd.index_id, "indexes": bnd.indexes, 'geometry': bnd.geometry}) return gpd.GeoDataFrame(data, crs=self.mesh.crs) def __len__(self) -> int: """Returns the number of boundary segments""" return len(self()) def auto_generate( self, threshold: float = 0., land_ibtype: int = 0, interior_ibtype: int = 1, ): """Automatically detect boundaries based on elevation data. Parameters ---------- threshold : float, default=0 Threshold above which nodes are considered dry nodes for ocean vs land boundary detection land_ibtype : int, default=0 Value to assign to land boundary type interior_ibtype : int, default=1 Value to assign to island boundary type Returns ------- None Raises ------ ValueError If any of the values assigned to a mesh node is `np.nan`. Notes ----- An edge is considered dry if any of the attached nodes are dry (its elevation is larger than or equal to the `threshold`). """ values = self.mesh.value if np.any(np.isnan(values)): raise ValueError( "Mesh contains invalid values. Raster values must" "be interpolated to the mesh before generating " "boundaries.") coords = self.mesh.msh_t.vert2['coord'] coo_to_idx = { tuple(coo): idx for idx, coo in enumerate(coords)} polys = utils.get_mesh_polygons(self.mesh.msh_t) # TODO: Split using shapely to get bdry segments boundaries = defaultdict(defaultdict) bdry_type = dict get_id = self.mesh.nodes.get_id_by_index # generate exterior boundaries for poly in polys: ext_ring_coo = poly.exterior.coords ext_ring = np.array([ (coo_to_idx[ext_ring_coo[e]], coo_to_idx[ext_ring_coo[e + 1]]) for e, coo in enumerate(ext_ring_coo[:-1])]) # find boundary edges edge_tag = np.full(ext_ring.shape, 0) edge_tag[ np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1 edge_tag[ np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1 edge_tag[ np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1 edge_tag[ np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1 # sort boundary edges ocean_boundary = [] land_boundary = [] for i, (e0, e1) in enumerate(edge_tag): if np.any(np.asarray((e0, e1)) == 1): land_boundary.append(tuple(ext_ring[i, :])) elif np.any(np.asarray((e0, e1)) == -1): ocean_boundary.append(tuple(ext_ring[i, :])) # ocean_boundaries = utils.sort_edges(ocean_boundary) # land_boundaries = utils.sort_edges(land_boundary) ocean_boundaries = [] if len(ocean_boundary) != 0: #pylint: disable=not-an-iterable ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist()) ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs ocean_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in ocean_segs] land_boundaries = [] if len(land_boundary) != 0: #pylint: disable=not-an-iterable land_segs = linemerge(coords[np.array(land_boundary)].tolist()) land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs land_boundaries = [ [(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]]) for e, coo in enumerate(seg.coords[:-1])] for seg in land_segs] _bnd_id = len(boundaries[None]) for bnd in ocean_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[None][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # add land boundaries _bnd_id = len(boundaries[land_ibtype]) for bnd in land_boundaries: e0, e1 = [list(t) for t in zip(*bnd)] e0 = [get_id(vert) for vert in e0] data = e0 + [get_id(e1[-1])] boundaries[land_ibtype][_bnd_id] = bdry_type( indexes=data, properties={}) _bnd_id += 1 # generate interior boundaries _bnd_id = 0 interior_boundaries = defaultdict() for poly in polys: interiors = poly.interiors for interior in interiors: int_ring_coo = interior.coords int_ring = [ (coo_to_idx[int_ring_coo[e]], coo_to_idx[int_ring_coo[e + 1]]) for e, coo in enumerate(int_ring_coo[:-1])] # TODO: Do we still need these? e0, e1 = [list(t) for t in zip(*int_ring)] if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0: e0 = e0[::-1] e1 = e1[::-1] e0 = [get_id(vert) for vert in e0] e0.append(e0[0]) interior_boundaries[_bnd_id] = e0 _bnd_id += 1 for bnd_id, data in interior_boundaries.items(): boundaries[interior_ibtype][bnd_id] = bdry_type( indexes=data, properties={}) self._data = boundaries self._init_dataframes.cache_clear() self.__call__.cache_clear() self._init_dataframes() SortedRingType = Dict[int, Dict[Literal['exterior', 'interiors'], Union[npt.NDArray, List[npt.NDArray]]] ] def sort_rings( index_rings: List[List[Tuple[int, int]]], vertices: npt.NDArray[np.float32]) -> SortedRingType: """Sorts a list of index-rings. Takes a list of unsorted index rings and sorts them into "exterior" and "interior" components. Any doubly-nested rings are considered exterior rings. Parameters ---------- index_rings : List[List[Tuple[int, int]]] Unosorted list of list of mesh edges as specified by end node indexs of each edge. vertices : npt.NDArray[np.float32] 2D ``n x 2`` array of node coordinate couples. Returns ------- SortedRingType Dictionary of information aboout polygon boundaries extracted based on the input Notes ----- The return value is a mapping of ring index to dictionary containing exterior and interior linear ring information as numpy array This function is not currently used, instead a different faster approach is used for boundary and polygon calculation from elements. """ # TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can # probably be optimized using shapely. # sort index_rings into corresponding "polygons" areas = [] for index_ring in index_rings: e0, e1 = [list(t) for t in zip(*index_ring)] areas.append(float(Polygon(vertices[e0, :]).area)) # maximum area must be main mesh idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id = 0 _index_rings = {} _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) while len(index_rings) > 0: # find all internal rings potential_interiors = [] for i, index_ring in enumerate(index_rings): e0, e1 = [list(t) for t in zip(*index_ring)] if path.contains_point(vertices[e0[0], :]): potential_interiors.append(i) # filter out nested rings real_interiors = [] for i, p_interior in reversed( list(enumerate(potential_interiors))): _p_interior = index_rings[p_interior] check = [index_rings[k] for j, k in reversed(list(enumerate(potential_interiors))) if i != j] has_parent = False for _path in check: e0, e1 = [list(t) for t in zip(*_path)] _path = Path(vertices[e0 + [e0[0]], :], closed=True) if _path.contains_point(vertices[_p_interior[0][0], :]): has_parent = True if not has_parent: real_interiors.append(p_interior) # pop real rings from collection for i in reversed(sorted(real_interiors)): _index_rings[_id]['interiors'].append( np.asarray(index_rings.pop(i))) areas.pop(i) # if no internal rings found, initialize next polygon if len(index_rings) > 0: idx = areas.index(np.max(areas)) exterior = index_rings.pop(idx) areas.pop(idx) _id += 1 _index_rings[_id] = { 'exterior': np.asarray(exterior), 'interiors': [] } e0, e1 = [list(t) for t in zip(*exterior)] path = Path(vertices[e0 + [e0[0]], :], closed=True) return _index_rings def _mesh_interpolate_worker( coords: npt.NDArray[np.float32], coords_crs: CRS, raster_path: Union[str, Path], chunk_size: Optional[int], method: Literal['spline', 'linear', 'nearest'] = "spline", filter_by_shape: bool = False): """Interpolator worker function to be used in parallel calls Parameters ---------- coords : npt.NDArray[np.float32] Mesh node coordinates. coords_crs : CRS Coordinate reference system of the input mesh coordinates. raster_path : str or Path Path to the raster temporary working file. chunk_size : int or None Chunk size for windowing over the raster. method : {'spline', 'linear', 'nearest'}, default='spline' Method of interpolation. filter_by_shape : bool Flag for node filtering based on raster bbox or shape Returns ------- idxs : npt.NDArray[bool] Mask of the nodes whose values are updated by current interpolation values : npt.NDArray[np.float32] Interpolated values. Raises ------ ValueError If specified interpolation `method` is not supported. """ coords = np.array(coords) raster = Raster(raster_path) idxs = [] values = [] for window in raster.iter_windows(chunk_size=chunk_size, overlap=2): if not raster.crs.equals(coords_crs): transformer = Transformer.from_crs( coords_crs, raster.crs, always_xy=True) # pylint: disable=E0633 coords[:, 0], coords[:, 1] = transformer.transform( coords[:, 0], coords[:, 1]) xi = raster.get_x(window) yi = raster.get_y(window) # Use masked array to ignore missing values from DEM zi = raster.get_values(window=window, masked=True) if not filter_by_shape: _idxs = np.logical_and( np.logical_and( np.min(xi) <= coords[:, 0], np.max(xi) >= coords[:, 0]), np.logical_and( np.min(yi) <= coords[:, 1], np.max(yi) >= coords[:, 1])) else: shape = raster.get_multipolygon() gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1]) _idxs = gs_pt.intersects(shape) interp_mask = None if method == 'spline': f = RectBivariateSpline( xi, np.flip(yi), np.flipud(zi).T, kx=3, ky=3, s=0, # bbox=[min(x), max(x), min(y), max(y)] # ?? ) _values = f.ev(coords[_idxs, 0], coords[_idxs, 1]) elif method in ['nearest', 'linear']: # Inspired by StackOverflow 35807321 if np.any(zi.mask): m_interp = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi.mask).T.astype(bool), method=method ) # Pick nodes NOT "contaminated" by masked values interp_mask = m_interp(coords[_idxs]) > 0 f = RegularGridInterpolator( (xi, np.flip(yi)), np.flipud(zi).T, method=method ) _values = f(coords[_idxs]) else: raise ValueError( f"Invalid value method specified <{method}>!") if interp_mask is not None: # pylint: disable=invalid-unary-operand-type helper = np.ones_like(_values).astype(bool) helper[interp_mask] = False # _idxs is inverse mask _idxs[_idxs] = helper _values = _values[~interp_mask] idxs.append(_idxs) values.append(_values) return (np.hstack(idxs), np.hstack(values))
group_policies_gen
Filter policies using the following steps: 1. Apply prioritization among the policies that are sharing the same policy type and resource type 2. Remove redundant policies that may applicable across different types of resource 3. Filter policies based on type and return :param flat_policies: list of flat policies :return: Filtered policies
# ------------------------------------------------------------------------- # Copyright (c) 2015-2017 AT&T Intellectual Property # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------- # import copy import json from collections import defaultdict import itertools from osdf.utils.programming_utils import dot_notation, list_flatten # MASKED: group_policies_gen function (lines 26-55) def policy_name_as_regex(policy_name): """Get the correct policy name as a regex (e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*) :param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy :return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.* """ p = policy_name.partition('.') return p[0] + p[1] + ".*" + p[2] + ".*" def retrieve_node(req_json, reference): """ Get the child node(s) from the dot-notation [reference] and parent [req_json]. For placement and other requests, there are encoded JSONs inside the request or policy, so we need to expand it and then do a search over the parent plus expanded JSON. """ req_json_copy = copy.deepcopy(req_json) info = dot_notation(req_json_copy, reference) return list_flatten(info) if isinstance(info, list) else info
def group_policies_gen(flat_policies, config): """Filter policies using the following steps: 1. Apply prioritization among the policies that are sharing the same policy type and resource type 2. Remove redundant policies that may applicable across different types of resource 3. Filter policies based on type and return :param flat_policies: list of flat policies :return: Filtered policies """ filtered_policies = defaultdict(list) policy_name = [] policies = [x for x in flat_policies if x[list(x.keys())[0]]["type"]] # drop ones without 'type' priority = config.get('policy_info', {}).get('prioritization_attributes', {}) aggregated_policies = dict() for plc in policies: attrs = [dot_notation(plc[list(plc.keys())[0]], dot_path) for key in priority.keys() for dot_path in priority[key]] attrs_list = [x if isinstance(x, list) else [x] for x in attrs] attributes = [list_flatten(x) if isinstance(x, list) else x for x in attrs_list] for y in itertools.product(*attributes): aggregated_policies.setdefault(y, []) aggregated_policies[y].append(plc) for key in aggregated_policies.keys(): #aggregated_policies[key].sort(key=lambda x: x['priority'], reverse=True) prioritized_policy = aggregated_policies[key][0] if list(prioritized_policy.keys())[0] not in policy_name: # TODO: Check logic here... should policy appear only once across all groups? filtered_policies[prioritized_policy[list(prioritized_policy.keys())[0]]['type']].append(prioritized_policy) policy_name.append(list(prioritized_policy.keys())[0]) return filtered_policies
26
55
# ------------------------------------------------------------------------- # Copyright (c) 2015-2017 AT&T Intellectual Property # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------- # import copy import json from collections import defaultdict import itertools from osdf.utils.programming_utils import dot_notation, list_flatten def group_policies_gen(flat_policies, config): """Filter policies using the following steps: 1. Apply prioritization among the policies that are sharing the same policy type and resource type 2. Remove redundant policies that may applicable across different types of resource 3. Filter policies based on type and return :param flat_policies: list of flat policies :return: Filtered policies """ filtered_policies = defaultdict(list) policy_name = [] policies = [x for x in flat_policies if x[list(x.keys())[0]]["type"]] # drop ones without 'type' priority = config.get('policy_info', {}).get('prioritization_attributes', {}) aggregated_policies = dict() for plc in policies: attrs = [dot_notation(plc[list(plc.keys())[0]], dot_path) for key in priority.keys() for dot_path in priority[key]] attrs_list = [x if isinstance(x, list) else [x] for x in attrs] attributes = [list_flatten(x) if isinstance(x, list) else x for x in attrs_list] for y in itertools.product(*attributes): aggregated_policies.setdefault(y, []) aggregated_policies[y].append(plc) for key in aggregated_policies.keys(): #aggregated_policies[key].sort(key=lambda x: x['priority'], reverse=True) prioritized_policy = aggregated_policies[key][0] if list(prioritized_policy.keys())[0] not in policy_name: # TODO: Check logic here... should policy appear only once across all groups? filtered_policies[prioritized_policy[list(prioritized_policy.keys())[0]]['type']].append(prioritized_policy) policy_name.append(list(prioritized_policy.keys())[0]) return filtered_policies def policy_name_as_regex(policy_name): """Get the correct policy name as a regex (e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*) :param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy :return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.* """ p = policy_name.partition('.') return p[0] + p[1] + ".*" + p[2] + ".*" def retrieve_node(req_json, reference): """ Get the child node(s) from the dot-notation [reference] and parent [req_json]. For placement and other requests, there are encoded JSONs inside the request or policy, so we need to expand it and then do a search over the parent plus expanded JSON. """ req_json_copy = copy.deepcopy(req_json) info = dot_notation(req_json_copy, reference) return list_flatten(info) if isinstance(info, list) else info
blackbody
The ratio of the blackbody function for dust at frequency nu over the value for reference frequency ref_freq Arguments --------- nu : float Frequency in GHz. ref_freq : float Reference frequency in GHz. Returns ------- blackbody_ratio : float B(nu, T_dust) / B(nu_ref, T_dust)
from __future__ import print_function from __future__ import absolute_import from __future__ import division import numpy as np __all__ = [ "wigner3j", "get_camb_cl", "scale_dust", ] # MASKED: blackbody function (lines 13-37) def rj2cmb(nu_in): """ Conversion from Rayleigh-Jeans units to CMB temperature units Arguments --------- nu_in : float Frequency in GHz. Returns ------- cal_fac : float Number by which to multiply a RJ temperature to get a CMB temp """ k = 1.38064852e-23 # Boltzmann constant h = 6.626070040e-34 # Planck constant T = 2.72548 # Cmb BB temp in K nu = nu_in * 1.0e9 # GHz -> Hz x = h * nu / k / T return (np.exp(x) - 1.0) ** 2 / (x ** 2 * np.exp(x)) def scale_dust(freq0, freq1, ref_freq, beta, delta_beta=None, deriv=False): """ Get the factor by which you must multiply the cross spectrum from maps of frequencies freq0 and freq1 to match the dust power at ref_freq given spectra index beta. If deriv is True, return the frequency scaling at the reference beta, and the first derivative w.r.t. beta. Otherwise if delta_beta is given, return the scale factor adjusted for a linearized offset delta_beta from the reference beta. Arguments --------- freq0 : float Frequency of map0 in GHz. freq1 : float Frequency of map1 in GHz. ref_freq : float Reference frequency from which to compute relative scaling in GHz. beta : float Dust spectral index. delta_beta : float Difference from beta-- scaling computed as a first order Taylor expansion from original beta-scaling. deriv : bool If true, return the frequency scaling at the reference beta, along with the first derivative w.r.t. beta at the reference beta. Returns ------- freq_scale : float The relative scaling factor for the dust cross spectrum-- multiply by this number to get the dust spectrum at the reference frequency -- or -- freq_scale, deriv : floats The relative scaling factor and its derivative """ freq_scale = ( rj2cmb(freq0) * rj2cmb(freq1) / rj2cmb(ref_freq) ** 2.0 * blackbody(freq0, ref_freq=ref_freq) * blackbody(freq1, ref_freq=ref_freq) * (freq0 * freq1 / ref_freq ** 2) ** (beta - 2.0) ) if deriv or delta_beta is not None: delta = np.log(freq0 * freq1 / ref_freq ** 2) if deriv: return (freq_scale, freq_scale * delta) return freq_scale * (1 + delta * delta_beta) return freq_scale def wigner3j(l2, m2, l3, m3): r""" Wigner 3j symbols computed for all valid values of ``L``, as in: .. math:: \begin{pmatrix} \ell_2 & \ell_3 & L \\ m_2 & m_3 & 0 \\ \end{pmatrix} Arguments --------- l2, m2, l3, m3 : int The ell and m values for which to compute the symbols. Returns ------- fj : array_like Array of size ``l2 + l3 + 2``, indexed by ``L`` lmin : int The minimum value of ``L`` for which ``fj`` is non-zero. lmax : int The maximum value of ``L`` for which ``fj`` is non-zero. """ import camb try: from camb.mathutils import threej except ImportError: from camb.bispectrum import threej arr = threej(l2, l3, m2, m3) lmin = np.max([np.abs(l2 - l3), np.abs(m2 + m3)]) lmax = l2 + l3 fj = np.zeros(lmax + 2, dtype=arr.dtype) fj[lmin : lmax + 1] = arr return fj, lmin, lmax def get_camb_cl(r, lmax, nt=None, spec="total", lfac=True): """ Compute camb spectrum with tensors and lensing. Parameter values are from arXiv:1807.06209 Table 1 Plik best fit Arguments --------- r : float Tensor-to-scalar ratio lmax : int Maximum ell for which to compute spectra nt : scalar, optional Tensor spectral index. If not supplied, assumes slow-roll consistency relation. spec : string, optional Spectrum component to return. Can be 'total', 'unlensed_total', 'unlensed_scalar', 'lensed_scalar', 'tensor', 'lens_potential'. lfac: bool, optional If True, multiply Cls by ell*(ell+1)/2/pi Returns ------- cls : array_like Array of spectra of shape (lmax + 1, nspec). Diagonal ordering (TT, EE, BB, TE). """ # Set up a new set of parameters for CAMB import camb pars = camb.CAMBparams() # This function sets up CosmoMC-like settings, with one massive neutrino and # helium set using BBN consistency pars.set_cosmology( H0=67.32, ombh2=0.022383, omch2=0.12011, mnu=0.06, omk=0, tau=0.0543, ) ln1010As = 3.0448 pars.InitPower.set_params(As=np.exp(ln1010As) / 1.0e10, ns=0.96605, r=r, nt=nt) if lmax < 2500: # This results in unacceptable bias. Use higher lmax, then cut it down lmax0 = 2500 else: lmax0 = lmax pars.set_for_lmax(lmax0, lens_potential_accuracy=2) pars.WantTensors = True pars.do_lensing = True # calculate results for these parameters results = camb.get_results(pars) powers = results.get_cmb_power_spectra(pars, CMB_unit="muK", raw_cl=not lfac) totCL = powers[spec][: lmax + 1, :4].T return totCL
def blackbody(nu, ref_freq=353.0): """ The ratio of the blackbody function for dust at frequency nu over the value for reference frequency ref_freq Arguments --------- nu : float Frequency in GHz. ref_freq : float Reference frequency in GHz. Returns ------- blackbody_ratio : float B(nu, T_dust) / B(nu_ref, T_dust) """ k = 1.38064852e-23 # Boltzmann constant h = 6.626070040e-34 # Planck constant T = 19.6 nu_ref = ref_freq * 1.0e9 nu *= 1.0e9 # GHz -> Hz x = h * nu / k / T x_ref = h * nu_ref / k / T return x ** 3 / x_ref ** 3 * (np.exp(x_ref) - 1) / (np.exp(x) - 1)
13
37
from __future__ import print_function from __future__ import absolute_import from __future__ import division import numpy as np __all__ = [ "wigner3j", "get_camb_cl", "scale_dust", ] def blackbody(nu, ref_freq=353.0): """ The ratio of the blackbody function for dust at frequency nu over the value for reference frequency ref_freq Arguments --------- nu : float Frequency in GHz. ref_freq : float Reference frequency in GHz. Returns ------- blackbody_ratio : float B(nu, T_dust) / B(nu_ref, T_dust) """ k = 1.38064852e-23 # Boltzmann constant h = 6.626070040e-34 # Planck constant T = 19.6 nu_ref = ref_freq * 1.0e9 nu *= 1.0e9 # GHz -> Hz x = h * nu / k / T x_ref = h * nu_ref / k / T return x ** 3 / x_ref ** 3 * (np.exp(x_ref) - 1) / (np.exp(x) - 1) def rj2cmb(nu_in): """ Conversion from Rayleigh-Jeans units to CMB temperature units Arguments --------- nu_in : float Frequency in GHz. Returns ------- cal_fac : float Number by which to multiply a RJ temperature to get a CMB temp """ k = 1.38064852e-23 # Boltzmann constant h = 6.626070040e-34 # Planck constant T = 2.72548 # Cmb BB temp in K nu = nu_in * 1.0e9 # GHz -> Hz x = h * nu / k / T return (np.exp(x) - 1.0) ** 2 / (x ** 2 * np.exp(x)) def scale_dust(freq0, freq1, ref_freq, beta, delta_beta=None, deriv=False): """ Get the factor by which you must multiply the cross spectrum from maps of frequencies freq0 and freq1 to match the dust power at ref_freq given spectra index beta. If deriv is True, return the frequency scaling at the reference beta, and the first derivative w.r.t. beta. Otherwise if delta_beta is given, return the scale factor adjusted for a linearized offset delta_beta from the reference beta. Arguments --------- freq0 : float Frequency of map0 in GHz. freq1 : float Frequency of map1 in GHz. ref_freq : float Reference frequency from which to compute relative scaling in GHz. beta : float Dust spectral index. delta_beta : float Difference from beta-- scaling computed as a first order Taylor expansion from original beta-scaling. deriv : bool If true, return the frequency scaling at the reference beta, along with the first derivative w.r.t. beta at the reference beta. Returns ------- freq_scale : float The relative scaling factor for the dust cross spectrum-- multiply by this number to get the dust spectrum at the reference frequency -- or -- freq_scale, deriv : floats The relative scaling factor and its derivative """ freq_scale = ( rj2cmb(freq0) * rj2cmb(freq1) / rj2cmb(ref_freq) ** 2.0 * blackbody(freq0, ref_freq=ref_freq) * blackbody(freq1, ref_freq=ref_freq) * (freq0 * freq1 / ref_freq ** 2) ** (beta - 2.0) ) if deriv or delta_beta is not None: delta = np.log(freq0 * freq1 / ref_freq ** 2) if deriv: return (freq_scale, freq_scale * delta) return freq_scale * (1 + delta * delta_beta) return freq_scale def wigner3j(l2, m2, l3, m3): r""" Wigner 3j symbols computed for all valid values of ``L``, as in: .. math:: \begin{pmatrix} \ell_2 & \ell_3 & L \\ m_2 & m_3 & 0 \\ \end{pmatrix} Arguments --------- l2, m2, l3, m3 : int The ell and m values for which to compute the symbols. Returns ------- fj : array_like Array of size ``l2 + l3 + 2``, indexed by ``L`` lmin : int The minimum value of ``L`` for which ``fj`` is non-zero. lmax : int The maximum value of ``L`` for which ``fj`` is non-zero. """ import camb try: from camb.mathutils import threej except ImportError: from camb.bispectrum import threej arr = threej(l2, l3, m2, m3) lmin = np.max([np.abs(l2 - l3), np.abs(m2 + m3)]) lmax = l2 + l3 fj = np.zeros(lmax + 2, dtype=arr.dtype) fj[lmin : lmax + 1] = arr return fj, lmin, lmax def get_camb_cl(r, lmax, nt=None, spec="total", lfac=True): """ Compute camb spectrum with tensors and lensing. Parameter values are from arXiv:1807.06209 Table 1 Plik best fit Arguments --------- r : float Tensor-to-scalar ratio lmax : int Maximum ell for which to compute spectra nt : scalar, optional Tensor spectral index. If not supplied, assumes slow-roll consistency relation. spec : string, optional Spectrum component to return. Can be 'total', 'unlensed_total', 'unlensed_scalar', 'lensed_scalar', 'tensor', 'lens_potential'. lfac: bool, optional If True, multiply Cls by ell*(ell+1)/2/pi Returns ------- cls : array_like Array of spectra of shape (lmax + 1, nspec). Diagonal ordering (TT, EE, BB, TE). """ # Set up a new set of parameters for CAMB import camb pars = camb.CAMBparams() # This function sets up CosmoMC-like settings, with one massive neutrino and # helium set using BBN consistency pars.set_cosmology( H0=67.32, ombh2=0.022383, omch2=0.12011, mnu=0.06, omk=0, tau=0.0543, ) ln1010As = 3.0448 pars.InitPower.set_params(As=np.exp(ln1010As) / 1.0e10, ns=0.96605, r=r, nt=nt) if lmax < 2500: # This results in unacceptable bias. Use higher lmax, then cut it down lmax0 = 2500 else: lmax0 = lmax pars.set_for_lmax(lmax0, lens_potential_accuracy=2) pars.WantTensors = True pars.do_lensing = True # calculate results for these parameters results = camb.get_results(pars) powers = results.get_cmb_power_spectra(pars, CMB_unit="muK", raw_cl=not lfac) totCL = powers[spec][: lmax + 1, :4].T return totCL
wigner3j
Wigner 3j symbols computed for all valid values of ``L``, as in: .. math:: \begin{pmatrix} \ell_2 & \ell_3 & L \\ m_2 & m_3 & 0 \\ \end{pmatrix} Arguments --------- l2, m2, l3, m3 : int The ell and m values for which to compute the symbols. Returns ------- fj : array_like Array of size ``l2 + l3 + 2``, indexed by ``L`` lmin : int The minimum value of ``L`` for which ``fj`` is non-zero. lmax : int The maximum value of ``L`` for which ``fj`` is non-zero.
from __future__ import print_function from __future__ import absolute_import from __future__ import division import numpy as np __all__ = [ "wigner3j", "get_camb_cl", "scale_dust", ] def blackbody(nu, ref_freq=353.0): """ The ratio of the blackbody function for dust at frequency nu over the value for reference frequency ref_freq Arguments --------- nu : float Frequency in GHz. ref_freq : float Reference frequency in GHz. Returns ------- blackbody_ratio : float B(nu, T_dust) / B(nu_ref, T_dust) """ k = 1.38064852e-23 # Boltzmann constant h = 6.626070040e-34 # Planck constant T = 19.6 nu_ref = ref_freq * 1.0e9 nu *= 1.0e9 # GHz -> Hz x = h * nu / k / T x_ref = h * nu_ref / k / T return x ** 3 / x_ref ** 3 * (np.exp(x_ref) - 1) / (np.exp(x) - 1) def rj2cmb(nu_in): """ Conversion from Rayleigh-Jeans units to CMB temperature units Arguments --------- nu_in : float Frequency in GHz. Returns ------- cal_fac : float Number by which to multiply a RJ temperature to get a CMB temp """ k = 1.38064852e-23 # Boltzmann constant h = 6.626070040e-34 # Planck constant T = 2.72548 # Cmb BB temp in K nu = nu_in * 1.0e9 # GHz -> Hz x = h * nu / k / T return (np.exp(x) - 1.0) ** 2 / (x ** 2 * np.exp(x)) def scale_dust(freq0, freq1, ref_freq, beta, delta_beta=None, deriv=False): """ Get the factor by which you must multiply the cross spectrum from maps of frequencies freq0 and freq1 to match the dust power at ref_freq given spectra index beta. If deriv is True, return the frequency scaling at the reference beta, and the first derivative w.r.t. beta. Otherwise if delta_beta is given, return the scale factor adjusted for a linearized offset delta_beta from the reference beta. Arguments --------- freq0 : float Frequency of map0 in GHz. freq1 : float Frequency of map1 in GHz. ref_freq : float Reference frequency from which to compute relative scaling in GHz. beta : float Dust spectral index. delta_beta : float Difference from beta-- scaling computed as a first order Taylor expansion from original beta-scaling. deriv : bool If true, return the frequency scaling at the reference beta, along with the first derivative w.r.t. beta at the reference beta. Returns ------- freq_scale : float The relative scaling factor for the dust cross spectrum-- multiply by this number to get the dust spectrum at the reference frequency -- or -- freq_scale, deriv : floats The relative scaling factor and its derivative """ freq_scale = ( rj2cmb(freq0) * rj2cmb(freq1) / rj2cmb(ref_freq) ** 2.0 * blackbody(freq0, ref_freq=ref_freq) * blackbody(freq1, ref_freq=ref_freq) * (freq0 * freq1 / ref_freq ** 2) ** (beta - 2.0) ) if deriv or delta_beta is not None: delta = np.log(freq0 * freq1 / ref_freq ** 2) if deriv: return (freq_scale, freq_scale * delta) return freq_scale * (1 + delta * delta_beta) return freq_scale # MASKED: wigner3j function (lines 118-155) def get_camb_cl(r, lmax, nt=None, spec="total", lfac=True): """ Compute camb spectrum with tensors and lensing. Parameter values are from arXiv:1807.06209 Table 1 Plik best fit Arguments --------- r : float Tensor-to-scalar ratio lmax : int Maximum ell for which to compute spectra nt : scalar, optional Tensor spectral index. If not supplied, assumes slow-roll consistency relation. spec : string, optional Spectrum component to return. Can be 'total', 'unlensed_total', 'unlensed_scalar', 'lensed_scalar', 'tensor', 'lens_potential'. lfac: bool, optional If True, multiply Cls by ell*(ell+1)/2/pi Returns ------- cls : array_like Array of spectra of shape (lmax + 1, nspec). Diagonal ordering (TT, EE, BB, TE). """ # Set up a new set of parameters for CAMB import camb pars = camb.CAMBparams() # This function sets up CosmoMC-like settings, with one massive neutrino and # helium set using BBN consistency pars.set_cosmology( H0=67.32, ombh2=0.022383, omch2=0.12011, mnu=0.06, omk=0, tau=0.0543, ) ln1010As = 3.0448 pars.InitPower.set_params(As=np.exp(ln1010As) / 1.0e10, ns=0.96605, r=r, nt=nt) if lmax < 2500: # This results in unacceptable bias. Use higher lmax, then cut it down lmax0 = 2500 else: lmax0 = lmax pars.set_for_lmax(lmax0, lens_potential_accuracy=2) pars.WantTensors = True pars.do_lensing = True # calculate results for these parameters results = camb.get_results(pars) powers = results.get_cmb_power_spectra(pars, CMB_unit="muK", raw_cl=not lfac) totCL = powers[spec][: lmax + 1, :4].T return totCL
def wigner3j(l2, m2, l3, m3): r""" Wigner 3j symbols computed for all valid values of ``L``, as in: .. math:: \begin{pmatrix} \ell_2 & \ell_3 & L \\ m_2 & m_3 & 0 \\ \end{pmatrix} Arguments --------- l2, m2, l3, m3 : int The ell and m values for which to compute the symbols. Returns ------- fj : array_like Array of size ``l2 + l3 + 2``, indexed by ``L`` lmin : int The minimum value of ``L`` for which ``fj`` is non-zero. lmax : int The maximum value of ``L`` for which ``fj`` is non-zero. """ import camb try: from camb.mathutils import threej except ImportError: from camb.bispectrum import threej arr = threej(l2, l3, m2, m3) lmin = np.max([np.abs(l2 - l3), np.abs(m2 + m3)]) lmax = l2 + l3 fj = np.zeros(lmax + 2, dtype=arr.dtype) fj[lmin : lmax + 1] = arr return fj, lmin, lmax
118
155
from __future__ import print_function from __future__ import absolute_import from __future__ import division import numpy as np __all__ = [ "wigner3j", "get_camb_cl", "scale_dust", ] def blackbody(nu, ref_freq=353.0): """ The ratio of the blackbody function for dust at frequency nu over the value for reference frequency ref_freq Arguments --------- nu : float Frequency in GHz. ref_freq : float Reference frequency in GHz. Returns ------- blackbody_ratio : float B(nu, T_dust) / B(nu_ref, T_dust) """ k = 1.38064852e-23 # Boltzmann constant h = 6.626070040e-34 # Planck constant T = 19.6 nu_ref = ref_freq * 1.0e9 nu *= 1.0e9 # GHz -> Hz x = h * nu / k / T x_ref = h * nu_ref / k / T return x ** 3 / x_ref ** 3 * (np.exp(x_ref) - 1) / (np.exp(x) - 1) def rj2cmb(nu_in): """ Conversion from Rayleigh-Jeans units to CMB temperature units Arguments --------- nu_in : float Frequency in GHz. Returns ------- cal_fac : float Number by which to multiply a RJ temperature to get a CMB temp """ k = 1.38064852e-23 # Boltzmann constant h = 6.626070040e-34 # Planck constant T = 2.72548 # Cmb BB temp in K nu = nu_in * 1.0e9 # GHz -> Hz x = h * nu / k / T return (np.exp(x) - 1.0) ** 2 / (x ** 2 * np.exp(x)) def scale_dust(freq0, freq1, ref_freq, beta, delta_beta=None, deriv=False): """ Get the factor by which you must multiply the cross spectrum from maps of frequencies freq0 and freq1 to match the dust power at ref_freq given spectra index beta. If deriv is True, return the frequency scaling at the reference beta, and the first derivative w.r.t. beta. Otherwise if delta_beta is given, return the scale factor adjusted for a linearized offset delta_beta from the reference beta. Arguments --------- freq0 : float Frequency of map0 in GHz. freq1 : float Frequency of map1 in GHz. ref_freq : float Reference frequency from which to compute relative scaling in GHz. beta : float Dust spectral index. delta_beta : float Difference from beta-- scaling computed as a first order Taylor expansion from original beta-scaling. deriv : bool If true, return the frequency scaling at the reference beta, along with the first derivative w.r.t. beta at the reference beta. Returns ------- freq_scale : float The relative scaling factor for the dust cross spectrum-- multiply by this number to get the dust spectrum at the reference frequency -- or -- freq_scale, deriv : floats The relative scaling factor and its derivative """ freq_scale = ( rj2cmb(freq0) * rj2cmb(freq1) / rj2cmb(ref_freq) ** 2.0 * blackbody(freq0, ref_freq=ref_freq) * blackbody(freq1, ref_freq=ref_freq) * (freq0 * freq1 / ref_freq ** 2) ** (beta - 2.0) ) if deriv or delta_beta is not None: delta = np.log(freq0 * freq1 / ref_freq ** 2) if deriv: return (freq_scale, freq_scale * delta) return freq_scale * (1 + delta * delta_beta) return freq_scale def wigner3j(l2, m2, l3, m3): r""" Wigner 3j symbols computed for all valid values of ``L``, as in: .. math:: \begin{pmatrix} \ell_2 & \ell_3 & L \\ m_2 & m_3 & 0 \\ \end{pmatrix} Arguments --------- l2, m2, l3, m3 : int The ell and m values for which to compute the symbols. Returns ------- fj : array_like Array of size ``l2 + l3 + 2``, indexed by ``L`` lmin : int The minimum value of ``L`` for which ``fj`` is non-zero. lmax : int The maximum value of ``L`` for which ``fj`` is non-zero. """ import camb try: from camb.mathutils import threej except ImportError: from camb.bispectrum import threej arr = threej(l2, l3, m2, m3) lmin = np.max([np.abs(l2 - l3), np.abs(m2 + m3)]) lmax = l2 + l3 fj = np.zeros(lmax + 2, dtype=arr.dtype) fj[lmin : lmax + 1] = arr return fj, lmin, lmax def get_camb_cl(r, lmax, nt=None, spec="total", lfac=True): """ Compute camb spectrum with tensors and lensing. Parameter values are from arXiv:1807.06209 Table 1 Plik best fit Arguments --------- r : float Tensor-to-scalar ratio lmax : int Maximum ell for which to compute spectra nt : scalar, optional Tensor spectral index. If not supplied, assumes slow-roll consistency relation. spec : string, optional Spectrum component to return. Can be 'total', 'unlensed_total', 'unlensed_scalar', 'lensed_scalar', 'tensor', 'lens_potential'. lfac: bool, optional If True, multiply Cls by ell*(ell+1)/2/pi Returns ------- cls : array_like Array of spectra of shape (lmax + 1, nspec). Diagonal ordering (TT, EE, BB, TE). """ # Set up a new set of parameters for CAMB import camb pars = camb.CAMBparams() # This function sets up CosmoMC-like settings, with one massive neutrino and # helium set using BBN consistency pars.set_cosmology( H0=67.32, ombh2=0.022383, omch2=0.12011, mnu=0.06, omk=0, tau=0.0543, ) ln1010As = 3.0448 pars.InitPower.set_params(As=np.exp(ln1010As) / 1.0e10, ns=0.96605, r=r, nt=nt) if lmax < 2500: # This results in unacceptable bias. Use higher lmax, then cut it down lmax0 = 2500 else: lmax0 = lmax pars.set_for_lmax(lmax0, lens_potential_accuracy=2) pars.WantTensors = True pars.do_lensing = True # calculate results for these parameters results = camb.get_results(pars) powers = results.get_cmb_power_spectra(pars, CMB_unit="muK", raw_cl=not lfac) totCL = powers[spec][: lmax + 1, :4].T return totCL
__init__
Initialize the event. Args: timestamp: The POSIX timestamp value. usage: A string containing the description string of the timestamp. identifier: The row identifier. full_name: A string containing the full name of the Skype account holder. display_name: A string containing the chosen display name of the account holder. email: A string containing the registered email address of the account holder. country: A string containing the chosen home country of the account holder.
# -*- coding: utf-8 -*- """This file contains a basic Skype SQLite parser.""" import logging from plaso.events import time_events from plaso.parsers import sqlite from plaso.parsers.sqlite_plugins import interface __author__ = 'Joaquin Moreno Garijo ([email protected])' class SkypeChatEvent(time_events.PosixTimeEvent): """Convenience class for a Skype event.""" DATA_TYPE = u'skype:event:chat' def __init__(self, row, to_account): """Build a Skype Event from a single row. Args: row: A row object (instance of sqlite3.Row) that contains the extracted data from a single row in the database. to_account: A string containing the accounts (excluding the author) of the conversation. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". super(SkypeChatEvent, self).__init__( row['timestamp'], u'Chat from Skype', self.DATA_TYPE) self.title = row['title'] self.text = row['body_xml'] self.from_account = u'{0:s} <{1:s}>'.format( row['from_displayname'], row['author']) self.to_account = to_account class SkypeAccountEvent(time_events.PosixTimeEvent): """Convenience class for account information.""" DATA_TYPE = u'skype:event:account' # MASKED: __init__ function (lines 46-70) class SkypeSMSEvent(time_events.PosixTimeEvent): """Convenience EventObject for SMS.""" DATA_TYPE = u'skype:event:sms' def __init__(self, row, dst_number): """Read the information related with the SMS. Args: row: row form the sql query. row['time_sms']: timestamp when the sms was send. row['dstnum_sms']: number which receives the sms. row['msg_sms']: text send to this sms. dst_number: phone number where the user send the sms. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". super(SkypeSMSEvent, self).__init__( row['time_sms'], u'SMS from Skype', self.DATA_TYPE) self.number = dst_number self.text = row['msg_sms'] class SkypeCallEvent(time_events.PosixTimeEvent): """Convenience EventObject for the calls.""" DATA_TYPE = u'skype:event:call' def __init__(self, timestamp, call_type, user_start_call, source, destination, video_conference): """Contains information if the call was cancelled, accepted or finished. Args: timestamp: the timestamp of the event. call_type: WAITING, STARTED, FINISHED. user_start_call: boolean, true indicates that the owner account started the call. source: the account which started the call. destination: the account which gets the call. video_conference: boolean, if is true it was a videoconference. """ super(SkypeCallEvent, self).__init__( timestamp, u'Call from Skype', self.DATA_TYPE) self.call_type = call_type self.user_start_call = user_start_call self.src_call = source self.dst_call = destination self.video_conference = video_conference class SkypeTransferFileEvent(time_events.PosixTimeEvent): """Evaluate the action of send a file.""" DATA_TYPE = u'skype:event:transferfile' def __init__(self, row, timestamp, action_type, source, destination): """Actions related with sending files. Args: row: filepath: path from the file. filename: name of the file. filesize: size of the file. timestamp: when the action happens. action_type: GETSOLICITUDE, SENDSOLICITUDE, ACCEPTED, FINISHED. source: The account that sent the file. destination: The account that received the file. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". super(SkypeTransferFileEvent, self).__init__( timestamp, u'File transfer from Skype', self.DATA_TYPE) self.offset = row['id'] self.action_type = action_type self.source = source self.destination = destination self.transferred_filepath = row['filepath'] self.transferred_filename = row['filename'] try: self.transferred_filesize = int(row['filesize']) except ValueError: logging.debug(u'Unknown filesize {0:s}'.format( self.transferred_filename)) self.transferred_filesize = 0 class SkypePlugin(interface.SQLitePlugin): """SQLite plugin for Skype main.db SQlite database file.""" NAME = u'skype' DESCRIPTION = u'Parser for Skype SQLite database files.' # Queries for building cache. QUERY_DEST_FROM_TRANSFER = ( u'SELECT parent_id, partner_handle AS skypeid, ' u'partner_dispname AS skypename FROM transfers') QUERY_SOURCE_FROM_TRANSFER = ( u'SELECT pk_id, partner_handle AS skypeid, ' u'partner_dispname AS skypename FROM transfers') # Define the needed queries. QUERIES = [ ((u'SELECT c.id, c.participants, c.friendlyname AS title, ' u'm.author AS author, m.from_dispname AS from_displayname, ' u'm.body_xml, m.timestamp, c.dialog_partner FROM Chats c, Messages m ' u'WHERE c.name = m.chatname'), u'ParseChat'), ((u'SELECT id, fullname, given_displayname, emails, ' u'country, profile_timestamp, authreq_timestamp, ' u'lastonline_timestamp, mood_timestamp, sent_authrequest_time, ' u'lastused_timestamp FROM Accounts'), u'ParseAccountInformation'), ((u'SELECT id, target_numbers AS dstnum_sms, timestamp AS time_sms, ' u'body AS msg_sms FROM SMSes'), u'ParseSMS'), ((u'SELECT id, partner_handle, partner_dispname, offer_send_list, ' u'starttime, accepttime, finishtime, filepath, filename, filesize, ' u'status, parent_id, pk_id FROM Transfers'), u'ParseFileTransfer'), ((u'SELECT c.id, cm.guid, c.is_incoming, ' u'cm.call_db_id, cm.videostatus, c.begin_timestamp AS try_call, ' u'cm.start_timestamp AS accept_call, cm.call_duration ' u'FROM Calls c, CallMembers cm ' u'WHERE c.id = cm.call_db_id;'), u'ParseCall')] # The required tables. REQUIRED_TABLES = frozenset([ u'Chats', u'Accounts', u'Conversations', u'Contacts', u'SMSes', u'Transfers', u'CallMembers', u'Calls']) def ParseAccountInformation( self, parser_mediator, row, query=None, **unused_kwargs): """Parses the Accounts database. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". if row['profile_timestamp']: event_object = SkypeAccountEvent( row['profile_timestamp'], u'Profile Changed', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['authreq_timestamp']: event_object = SkypeAccountEvent( row['authreq_timestamp'], u'Authenticate Request', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['lastonline_timestamp']: event_object = SkypeAccountEvent( row['lastonline_timestamp'], u'Last Online', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['mood_timestamp']: event_object = SkypeAccountEvent( row['mood_timestamp'], u'Mood Event', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['sent_authrequest_time']: event_object = SkypeAccountEvent( row['sent_authrequest_time'], u'Auth Request Sent', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['lastused_timestamp']: event_object = SkypeAccountEvent( row['lastused_timestamp'], u'Last Used', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) def ParseChat(self, parser_mediator, row, query=None, **unused_kwargs): """Parses a chat message row. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". to_account = u'' accounts = [] participants = row['participants'].split(' ') for participant in participants: if participant != row['author']: accounts.append(participant) to_account = u', '.join(accounts) if not to_account: if row['dialog_partner']: to_account = row['dialog_partner'] else: to_account = u'Unknown User' event_object = SkypeChatEvent(row, to_account) parser_mediator.ProduceEvent(event_object, query=query) def ParseSMS(self, parser_mediator, row, query=None, **unused_kwargs): """Parse SMS. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". dst_number = row['dstnum_sms'].replace(u' ', u'') event_object = SkypeSMSEvent(row, dst_number) parser_mediator.ProduceEvent(event_object, query=query) def ParseCall(self, parser_mediator, row, query=None, **unused_kwargs): """Parse the calls taking into accounts some rows. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". try: aux = row['guid'] if aux: aux_list = aux.split(u'-') src_aux = aux_list[0] dst_aux = aux_list[1] else: src_aux = u'Unknown [no GUID]' dst_aux = u'Unknown [no GUID]' except IndexError: src_aux = u'Unknown [{0:s}]'.format(row['guid']) dst_aux = u'Unknown [{0:s}]'.format(row['guid']) if row['is_incoming'] == u'0': user_start_call = True source = src_aux if row['ip_address']: destination = u'{0:s} <{1:s}>'.format(dst_aux, row['ip_address']) else: destination = dst_aux else: user_start_call = False source = src_aux destination = dst_aux if row['videostatus'] == u'3': video_conference = True else: video_conference = False event_object = SkypeCallEvent( row['try_call'], u'WAITING', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) if row['accept_call']: event_object = SkypeCallEvent( row['accept_call'], u'ACCEPTED', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) if row['call_duration']: try: timestamp = int(row['accept_call']) + int(row['call_duration']) event_object = SkypeCallEvent( timestamp, u'FINISHED', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) except ValueError: logging.debug(( u'[{0:s}] Unable to determine when the call {1:s} was ' u'finished.').format(self.NAME, row['id'])) def ParseFileTransfer( self, parser_mediator, row, cache=None, database=None, query=None, **unused_kwargs): """Parse the transfer files. There is no direct relationship between who sends the file and who accepts the file. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: the row with all information related with the file transfers. query: Optional query string. The default is None. cache: a cache object (instance of SQLiteCache). database: A database object (instance of SQLiteDatabase). """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". source_dict = cache.GetResults(u'source') if not source_dict: cursor = database.cursor results = cursor.execute(self.QUERY_SOURCE_FROM_TRANSFER) # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". cache.CacheQueryResults( results, 'source', 'pk_id', ('skypeid', 'skypename')) source_dict = cache.GetResults(u'source') dest_dict = cache.GetResults(u'destination') if not dest_dict: cursor = database.cursor results = cursor.execute(self.QUERY_DEST_FROM_TRANSFER) # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". cache.CacheQueryResults( results, 'destination', 'parent_id', ('skypeid', 'skypename')) dest_dict = cache.GetResults(u'destination') source = u'Unknown' destination = u'Unknown' if row['parent_id']: destination = u'{0:s} <{1:s}>'.format( row['partner_handle'], row['partner_dispname']) skype_id, skype_name = source_dict.get(row['parent_id'], [None, None]) if skype_name: source = u'{0:s} <{1:s}>'.format(skype_id, skype_name) else: source = u'{0:s} <{1:s}>'.format( row['partner_handle'], row['partner_dispname']) if row['pk_id']: skype_id, skype_name = dest_dict.get(row['pk_id'], [None, None]) if skype_name: destination = u'{0:s} <{1:s}>'.format(skype_id, skype_name) if row['status'] == 8: if row['starttime']: event_object = SkypeTransferFileEvent( row, row['starttime'], u'GETSOLICITUDE', source, destination) parser_mediator.ProduceEvent(event_object, query=query) if row['accepttime']: event_object = SkypeTransferFileEvent( row, row['accepttime'], u'ACCEPTED', source, destination) parser_mediator.ProduceEvent(event_object, query=query) if row['finishtime']: event_object = SkypeTransferFileEvent( row, row['finishtime'], u'FINISHED', source, destination) parser_mediator.ProduceEvent(event_object, query=query) elif row['status'] == 2 and row['starttime']: event_object = SkypeTransferFileEvent( row, row['starttime'], u'SENDSOLICITUDE', source, destination) parser_mediator.ProduceEvent(event_object, query=query) sqlite.SQLiteParser.RegisterPlugin(SkypePlugin)
def __init__( self, timestamp, usage, identifier, full_name, display_name, email, country): """Initialize the event. Args: timestamp: The POSIX timestamp value. usage: A string containing the description string of the timestamp. identifier: The row identifier. full_name: A string containing the full name of the Skype account holder. display_name: A string containing the chosen display name of the account holder. email: A string containing the registered email address of the account holder. country: A string containing the chosen home country of the account holder. """ super(SkypeAccountEvent, self).__init__(timestamp, usage) self.offset = identifier self.username = u'{0:s} <{1:s}>'.format(full_name, display_name) self.display_name = display_name self.email = email self.country = country self.data_type = self.DATA_TYPE
46
70
# -*- coding: utf-8 -*- """This file contains a basic Skype SQLite parser.""" import logging from plaso.events import time_events from plaso.parsers import sqlite from plaso.parsers.sqlite_plugins import interface __author__ = 'Joaquin Moreno Garijo ([email protected])' class SkypeChatEvent(time_events.PosixTimeEvent): """Convenience class for a Skype event.""" DATA_TYPE = u'skype:event:chat' def __init__(self, row, to_account): """Build a Skype Event from a single row. Args: row: A row object (instance of sqlite3.Row) that contains the extracted data from a single row in the database. to_account: A string containing the accounts (excluding the author) of the conversation. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". super(SkypeChatEvent, self).__init__( row['timestamp'], u'Chat from Skype', self.DATA_TYPE) self.title = row['title'] self.text = row['body_xml'] self.from_account = u'{0:s} <{1:s}>'.format( row['from_displayname'], row['author']) self.to_account = to_account class SkypeAccountEvent(time_events.PosixTimeEvent): """Convenience class for account information.""" DATA_TYPE = u'skype:event:account' def __init__( self, timestamp, usage, identifier, full_name, display_name, email, country): """Initialize the event. Args: timestamp: The POSIX timestamp value. usage: A string containing the description string of the timestamp. identifier: The row identifier. full_name: A string containing the full name of the Skype account holder. display_name: A string containing the chosen display name of the account holder. email: A string containing the registered email address of the account holder. country: A string containing the chosen home country of the account holder. """ super(SkypeAccountEvent, self).__init__(timestamp, usage) self.offset = identifier self.username = u'{0:s} <{1:s}>'.format(full_name, display_name) self.display_name = display_name self.email = email self.country = country self.data_type = self.DATA_TYPE class SkypeSMSEvent(time_events.PosixTimeEvent): """Convenience EventObject for SMS.""" DATA_TYPE = u'skype:event:sms' def __init__(self, row, dst_number): """Read the information related with the SMS. Args: row: row form the sql query. row['time_sms']: timestamp when the sms was send. row['dstnum_sms']: number which receives the sms. row['msg_sms']: text send to this sms. dst_number: phone number where the user send the sms. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". super(SkypeSMSEvent, self).__init__( row['time_sms'], u'SMS from Skype', self.DATA_TYPE) self.number = dst_number self.text = row['msg_sms'] class SkypeCallEvent(time_events.PosixTimeEvent): """Convenience EventObject for the calls.""" DATA_TYPE = u'skype:event:call' def __init__(self, timestamp, call_type, user_start_call, source, destination, video_conference): """Contains information if the call was cancelled, accepted or finished. Args: timestamp: the timestamp of the event. call_type: WAITING, STARTED, FINISHED. user_start_call: boolean, true indicates that the owner account started the call. source: the account which started the call. destination: the account which gets the call. video_conference: boolean, if is true it was a videoconference. """ super(SkypeCallEvent, self).__init__( timestamp, u'Call from Skype', self.DATA_TYPE) self.call_type = call_type self.user_start_call = user_start_call self.src_call = source self.dst_call = destination self.video_conference = video_conference class SkypeTransferFileEvent(time_events.PosixTimeEvent): """Evaluate the action of send a file.""" DATA_TYPE = u'skype:event:transferfile' def __init__(self, row, timestamp, action_type, source, destination): """Actions related with sending files. Args: row: filepath: path from the file. filename: name of the file. filesize: size of the file. timestamp: when the action happens. action_type: GETSOLICITUDE, SENDSOLICITUDE, ACCEPTED, FINISHED. source: The account that sent the file. destination: The account that received the file. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". super(SkypeTransferFileEvent, self).__init__( timestamp, u'File transfer from Skype', self.DATA_TYPE) self.offset = row['id'] self.action_type = action_type self.source = source self.destination = destination self.transferred_filepath = row['filepath'] self.transferred_filename = row['filename'] try: self.transferred_filesize = int(row['filesize']) except ValueError: logging.debug(u'Unknown filesize {0:s}'.format( self.transferred_filename)) self.transferred_filesize = 0 class SkypePlugin(interface.SQLitePlugin): """SQLite plugin for Skype main.db SQlite database file.""" NAME = u'skype' DESCRIPTION = u'Parser for Skype SQLite database files.' # Queries for building cache. QUERY_DEST_FROM_TRANSFER = ( u'SELECT parent_id, partner_handle AS skypeid, ' u'partner_dispname AS skypename FROM transfers') QUERY_SOURCE_FROM_TRANSFER = ( u'SELECT pk_id, partner_handle AS skypeid, ' u'partner_dispname AS skypename FROM transfers') # Define the needed queries. QUERIES = [ ((u'SELECT c.id, c.participants, c.friendlyname AS title, ' u'm.author AS author, m.from_dispname AS from_displayname, ' u'm.body_xml, m.timestamp, c.dialog_partner FROM Chats c, Messages m ' u'WHERE c.name = m.chatname'), u'ParseChat'), ((u'SELECT id, fullname, given_displayname, emails, ' u'country, profile_timestamp, authreq_timestamp, ' u'lastonline_timestamp, mood_timestamp, sent_authrequest_time, ' u'lastused_timestamp FROM Accounts'), u'ParseAccountInformation'), ((u'SELECT id, target_numbers AS dstnum_sms, timestamp AS time_sms, ' u'body AS msg_sms FROM SMSes'), u'ParseSMS'), ((u'SELECT id, partner_handle, partner_dispname, offer_send_list, ' u'starttime, accepttime, finishtime, filepath, filename, filesize, ' u'status, parent_id, pk_id FROM Transfers'), u'ParseFileTransfer'), ((u'SELECT c.id, cm.guid, c.is_incoming, ' u'cm.call_db_id, cm.videostatus, c.begin_timestamp AS try_call, ' u'cm.start_timestamp AS accept_call, cm.call_duration ' u'FROM Calls c, CallMembers cm ' u'WHERE c.id = cm.call_db_id;'), u'ParseCall')] # The required tables. REQUIRED_TABLES = frozenset([ u'Chats', u'Accounts', u'Conversations', u'Contacts', u'SMSes', u'Transfers', u'CallMembers', u'Calls']) def ParseAccountInformation( self, parser_mediator, row, query=None, **unused_kwargs): """Parses the Accounts database. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". if row['profile_timestamp']: event_object = SkypeAccountEvent( row['profile_timestamp'], u'Profile Changed', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['authreq_timestamp']: event_object = SkypeAccountEvent( row['authreq_timestamp'], u'Authenticate Request', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['lastonline_timestamp']: event_object = SkypeAccountEvent( row['lastonline_timestamp'], u'Last Online', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['mood_timestamp']: event_object = SkypeAccountEvent( row['mood_timestamp'], u'Mood Event', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['sent_authrequest_time']: event_object = SkypeAccountEvent( row['sent_authrequest_time'], u'Auth Request Sent', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['lastused_timestamp']: event_object = SkypeAccountEvent( row['lastused_timestamp'], u'Last Used', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) def ParseChat(self, parser_mediator, row, query=None, **unused_kwargs): """Parses a chat message row. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". to_account = u'' accounts = [] participants = row['participants'].split(' ') for participant in participants: if participant != row['author']: accounts.append(participant) to_account = u', '.join(accounts) if not to_account: if row['dialog_partner']: to_account = row['dialog_partner'] else: to_account = u'Unknown User' event_object = SkypeChatEvent(row, to_account) parser_mediator.ProduceEvent(event_object, query=query) def ParseSMS(self, parser_mediator, row, query=None, **unused_kwargs): """Parse SMS. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". dst_number = row['dstnum_sms'].replace(u' ', u'') event_object = SkypeSMSEvent(row, dst_number) parser_mediator.ProduceEvent(event_object, query=query) def ParseCall(self, parser_mediator, row, query=None, **unused_kwargs): """Parse the calls taking into accounts some rows. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". try: aux = row['guid'] if aux: aux_list = aux.split(u'-') src_aux = aux_list[0] dst_aux = aux_list[1] else: src_aux = u'Unknown [no GUID]' dst_aux = u'Unknown [no GUID]' except IndexError: src_aux = u'Unknown [{0:s}]'.format(row['guid']) dst_aux = u'Unknown [{0:s}]'.format(row['guid']) if row['is_incoming'] == u'0': user_start_call = True source = src_aux if row['ip_address']: destination = u'{0:s} <{1:s}>'.format(dst_aux, row['ip_address']) else: destination = dst_aux else: user_start_call = False source = src_aux destination = dst_aux if row['videostatus'] == u'3': video_conference = True else: video_conference = False event_object = SkypeCallEvent( row['try_call'], u'WAITING', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) if row['accept_call']: event_object = SkypeCallEvent( row['accept_call'], u'ACCEPTED', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) if row['call_duration']: try: timestamp = int(row['accept_call']) + int(row['call_duration']) event_object = SkypeCallEvent( timestamp, u'FINISHED', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) except ValueError: logging.debug(( u'[{0:s}] Unable to determine when the call {1:s} was ' u'finished.').format(self.NAME, row['id'])) def ParseFileTransfer( self, parser_mediator, row, cache=None, database=None, query=None, **unused_kwargs): """Parse the transfer files. There is no direct relationship between who sends the file and who accepts the file. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: the row with all information related with the file transfers. query: Optional query string. The default is None. cache: a cache object (instance of SQLiteCache). database: A database object (instance of SQLiteDatabase). """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". source_dict = cache.GetResults(u'source') if not source_dict: cursor = database.cursor results = cursor.execute(self.QUERY_SOURCE_FROM_TRANSFER) # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". cache.CacheQueryResults( results, 'source', 'pk_id', ('skypeid', 'skypename')) source_dict = cache.GetResults(u'source') dest_dict = cache.GetResults(u'destination') if not dest_dict: cursor = database.cursor results = cursor.execute(self.QUERY_DEST_FROM_TRANSFER) # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". cache.CacheQueryResults( results, 'destination', 'parent_id', ('skypeid', 'skypename')) dest_dict = cache.GetResults(u'destination') source = u'Unknown' destination = u'Unknown' if row['parent_id']: destination = u'{0:s} <{1:s}>'.format( row['partner_handle'], row['partner_dispname']) skype_id, skype_name = source_dict.get(row['parent_id'], [None, None]) if skype_name: source = u'{0:s} <{1:s}>'.format(skype_id, skype_name) else: source = u'{0:s} <{1:s}>'.format( row['partner_handle'], row['partner_dispname']) if row['pk_id']: skype_id, skype_name = dest_dict.get(row['pk_id'], [None, None]) if skype_name: destination = u'{0:s} <{1:s}>'.format(skype_id, skype_name) if row['status'] == 8: if row['starttime']: event_object = SkypeTransferFileEvent( row, row['starttime'], u'GETSOLICITUDE', source, destination) parser_mediator.ProduceEvent(event_object, query=query) if row['accepttime']: event_object = SkypeTransferFileEvent( row, row['accepttime'], u'ACCEPTED', source, destination) parser_mediator.ProduceEvent(event_object, query=query) if row['finishtime']: event_object = SkypeTransferFileEvent( row, row['finishtime'], u'FINISHED', source, destination) parser_mediator.ProduceEvent(event_object, query=query) elif row['status'] == 2 and row['starttime']: event_object = SkypeTransferFileEvent( row, row['starttime'], u'SENDSOLICITUDE', source, destination) parser_mediator.ProduceEvent(event_object, query=query) sqlite.SQLiteParser.RegisterPlugin(SkypePlugin)
ParseFileTransfer
Parse the transfer files. There is no direct relationship between who sends the file and who accepts the file. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: the row with all information related with the file transfers. query: Optional query string. The default is None. cache: a cache object (instance of SQLiteCache). database: A database object (instance of SQLiteDatabase).
# -*- coding: utf-8 -*- """This file contains a basic Skype SQLite parser.""" import logging from plaso.events import time_events from plaso.parsers import sqlite from plaso.parsers.sqlite_plugins import interface __author__ = 'Joaquin Moreno Garijo ([email protected])' class SkypeChatEvent(time_events.PosixTimeEvent): """Convenience class for a Skype event.""" DATA_TYPE = u'skype:event:chat' def __init__(self, row, to_account): """Build a Skype Event from a single row. Args: row: A row object (instance of sqlite3.Row) that contains the extracted data from a single row in the database. to_account: A string containing the accounts (excluding the author) of the conversation. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". super(SkypeChatEvent, self).__init__( row['timestamp'], u'Chat from Skype', self.DATA_TYPE) self.title = row['title'] self.text = row['body_xml'] self.from_account = u'{0:s} <{1:s}>'.format( row['from_displayname'], row['author']) self.to_account = to_account class SkypeAccountEvent(time_events.PosixTimeEvent): """Convenience class for account information.""" DATA_TYPE = u'skype:event:account' def __init__( self, timestamp, usage, identifier, full_name, display_name, email, country): """Initialize the event. Args: timestamp: The POSIX timestamp value. usage: A string containing the description string of the timestamp. identifier: The row identifier. full_name: A string containing the full name of the Skype account holder. display_name: A string containing the chosen display name of the account holder. email: A string containing the registered email address of the account holder. country: A string containing the chosen home country of the account holder. """ super(SkypeAccountEvent, self).__init__(timestamp, usage) self.offset = identifier self.username = u'{0:s} <{1:s}>'.format(full_name, display_name) self.display_name = display_name self.email = email self.country = country self.data_type = self.DATA_TYPE class SkypeSMSEvent(time_events.PosixTimeEvent): """Convenience EventObject for SMS.""" DATA_TYPE = u'skype:event:sms' def __init__(self, row, dst_number): """Read the information related with the SMS. Args: row: row form the sql query. row['time_sms']: timestamp when the sms was send. row['dstnum_sms']: number which receives the sms. row['msg_sms']: text send to this sms. dst_number: phone number where the user send the sms. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". super(SkypeSMSEvent, self).__init__( row['time_sms'], u'SMS from Skype', self.DATA_TYPE) self.number = dst_number self.text = row['msg_sms'] class SkypeCallEvent(time_events.PosixTimeEvent): """Convenience EventObject for the calls.""" DATA_TYPE = u'skype:event:call' def __init__(self, timestamp, call_type, user_start_call, source, destination, video_conference): """Contains information if the call was cancelled, accepted or finished. Args: timestamp: the timestamp of the event. call_type: WAITING, STARTED, FINISHED. user_start_call: boolean, true indicates that the owner account started the call. source: the account which started the call. destination: the account which gets the call. video_conference: boolean, if is true it was a videoconference. """ super(SkypeCallEvent, self).__init__( timestamp, u'Call from Skype', self.DATA_TYPE) self.call_type = call_type self.user_start_call = user_start_call self.src_call = source self.dst_call = destination self.video_conference = video_conference class SkypeTransferFileEvent(time_events.PosixTimeEvent): """Evaluate the action of send a file.""" DATA_TYPE = u'skype:event:transferfile' def __init__(self, row, timestamp, action_type, source, destination): """Actions related with sending files. Args: row: filepath: path from the file. filename: name of the file. filesize: size of the file. timestamp: when the action happens. action_type: GETSOLICITUDE, SENDSOLICITUDE, ACCEPTED, FINISHED. source: The account that sent the file. destination: The account that received the file. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". super(SkypeTransferFileEvent, self).__init__( timestamp, u'File transfer from Skype', self.DATA_TYPE) self.offset = row['id'] self.action_type = action_type self.source = source self.destination = destination self.transferred_filepath = row['filepath'] self.transferred_filename = row['filename'] try: self.transferred_filesize = int(row['filesize']) except ValueError: logging.debug(u'Unknown filesize {0:s}'.format( self.transferred_filename)) self.transferred_filesize = 0 class SkypePlugin(interface.SQLitePlugin): """SQLite plugin for Skype main.db SQlite database file.""" NAME = u'skype' DESCRIPTION = u'Parser for Skype SQLite database files.' # Queries for building cache. QUERY_DEST_FROM_TRANSFER = ( u'SELECT parent_id, partner_handle AS skypeid, ' u'partner_dispname AS skypename FROM transfers') QUERY_SOURCE_FROM_TRANSFER = ( u'SELECT pk_id, partner_handle AS skypeid, ' u'partner_dispname AS skypename FROM transfers') # Define the needed queries. QUERIES = [ ((u'SELECT c.id, c.participants, c.friendlyname AS title, ' u'm.author AS author, m.from_dispname AS from_displayname, ' u'm.body_xml, m.timestamp, c.dialog_partner FROM Chats c, Messages m ' u'WHERE c.name = m.chatname'), u'ParseChat'), ((u'SELECT id, fullname, given_displayname, emails, ' u'country, profile_timestamp, authreq_timestamp, ' u'lastonline_timestamp, mood_timestamp, sent_authrequest_time, ' u'lastused_timestamp FROM Accounts'), u'ParseAccountInformation'), ((u'SELECT id, target_numbers AS dstnum_sms, timestamp AS time_sms, ' u'body AS msg_sms FROM SMSes'), u'ParseSMS'), ((u'SELECT id, partner_handle, partner_dispname, offer_send_list, ' u'starttime, accepttime, finishtime, filepath, filename, filesize, ' u'status, parent_id, pk_id FROM Transfers'), u'ParseFileTransfer'), ((u'SELECT c.id, cm.guid, c.is_incoming, ' u'cm.call_db_id, cm.videostatus, c.begin_timestamp AS try_call, ' u'cm.start_timestamp AS accept_call, cm.call_duration ' u'FROM Calls c, CallMembers cm ' u'WHERE c.id = cm.call_db_id;'), u'ParseCall')] # The required tables. REQUIRED_TABLES = frozenset([ u'Chats', u'Accounts', u'Conversations', u'Contacts', u'SMSes', u'Transfers', u'CallMembers', u'Calls']) def ParseAccountInformation( self, parser_mediator, row, query=None, **unused_kwargs): """Parses the Accounts database. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". if row['profile_timestamp']: event_object = SkypeAccountEvent( row['profile_timestamp'], u'Profile Changed', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['authreq_timestamp']: event_object = SkypeAccountEvent( row['authreq_timestamp'], u'Authenticate Request', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['lastonline_timestamp']: event_object = SkypeAccountEvent( row['lastonline_timestamp'], u'Last Online', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['mood_timestamp']: event_object = SkypeAccountEvent( row['mood_timestamp'], u'Mood Event', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['sent_authrequest_time']: event_object = SkypeAccountEvent( row['sent_authrequest_time'], u'Auth Request Sent', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['lastused_timestamp']: event_object = SkypeAccountEvent( row['lastused_timestamp'], u'Last Used', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) def ParseChat(self, parser_mediator, row, query=None, **unused_kwargs): """Parses a chat message row. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". to_account = u'' accounts = [] participants = row['participants'].split(' ') for participant in participants: if participant != row['author']: accounts.append(participant) to_account = u', '.join(accounts) if not to_account: if row['dialog_partner']: to_account = row['dialog_partner'] else: to_account = u'Unknown User' event_object = SkypeChatEvent(row, to_account) parser_mediator.ProduceEvent(event_object, query=query) def ParseSMS(self, parser_mediator, row, query=None, **unused_kwargs): """Parse SMS. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". dst_number = row['dstnum_sms'].replace(u' ', u'') event_object = SkypeSMSEvent(row, dst_number) parser_mediator.ProduceEvent(event_object, query=query) def ParseCall(self, parser_mediator, row, query=None, **unused_kwargs): """Parse the calls taking into accounts some rows. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". try: aux = row['guid'] if aux: aux_list = aux.split(u'-') src_aux = aux_list[0] dst_aux = aux_list[1] else: src_aux = u'Unknown [no GUID]' dst_aux = u'Unknown [no GUID]' except IndexError: src_aux = u'Unknown [{0:s}]'.format(row['guid']) dst_aux = u'Unknown [{0:s}]'.format(row['guid']) if row['is_incoming'] == u'0': user_start_call = True source = src_aux if row['ip_address']: destination = u'{0:s} <{1:s}>'.format(dst_aux, row['ip_address']) else: destination = dst_aux else: user_start_call = False source = src_aux destination = dst_aux if row['videostatus'] == u'3': video_conference = True else: video_conference = False event_object = SkypeCallEvent( row['try_call'], u'WAITING', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) if row['accept_call']: event_object = SkypeCallEvent( row['accept_call'], u'ACCEPTED', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) if row['call_duration']: try: timestamp = int(row['accept_call']) + int(row['call_duration']) event_object = SkypeCallEvent( timestamp, u'FINISHED', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) except ValueError: logging.debug(( u'[{0:s}] Unable to determine when the call {1:s} was ' u'finished.').format(self.NAME, row['id'])) # MASKED: ParseFileTransfer function (lines 369-446) sqlite.SQLiteParser.RegisterPlugin(SkypePlugin)
def ParseFileTransfer( self, parser_mediator, row, cache=None, database=None, query=None, **unused_kwargs): """Parse the transfer files. There is no direct relationship between who sends the file and who accepts the file. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: the row with all information related with the file transfers. query: Optional query string. The default is None. cache: a cache object (instance of SQLiteCache). database: A database object (instance of SQLiteDatabase). """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". source_dict = cache.GetResults(u'source') if not source_dict: cursor = database.cursor results = cursor.execute(self.QUERY_SOURCE_FROM_TRANSFER) # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". cache.CacheQueryResults( results, 'source', 'pk_id', ('skypeid', 'skypename')) source_dict = cache.GetResults(u'source') dest_dict = cache.GetResults(u'destination') if not dest_dict: cursor = database.cursor results = cursor.execute(self.QUERY_DEST_FROM_TRANSFER) # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". cache.CacheQueryResults( results, 'destination', 'parent_id', ('skypeid', 'skypename')) dest_dict = cache.GetResults(u'destination') source = u'Unknown' destination = u'Unknown' if row['parent_id']: destination = u'{0:s} <{1:s}>'.format( row['partner_handle'], row['partner_dispname']) skype_id, skype_name = source_dict.get(row['parent_id'], [None, None]) if skype_name: source = u'{0:s} <{1:s}>'.format(skype_id, skype_name) else: source = u'{0:s} <{1:s}>'.format( row['partner_handle'], row['partner_dispname']) if row['pk_id']: skype_id, skype_name = dest_dict.get(row['pk_id'], [None, None]) if skype_name: destination = u'{0:s} <{1:s}>'.format(skype_id, skype_name) if row['status'] == 8: if row['starttime']: event_object = SkypeTransferFileEvent( row, row['starttime'], u'GETSOLICITUDE', source, destination) parser_mediator.ProduceEvent(event_object, query=query) if row['accepttime']: event_object = SkypeTransferFileEvent( row, row['accepttime'], u'ACCEPTED', source, destination) parser_mediator.ProduceEvent(event_object, query=query) if row['finishtime']: event_object = SkypeTransferFileEvent( row, row['finishtime'], u'FINISHED', source, destination) parser_mediator.ProduceEvent(event_object, query=query) elif row['status'] == 2 and row['starttime']: event_object = SkypeTransferFileEvent( row, row['starttime'], u'SENDSOLICITUDE', source, destination) parser_mediator.ProduceEvent(event_object, query=query)
369
446
# -*- coding: utf-8 -*- """This file contains a basic Skype SQLite parser.""" import logging from plaso.events import time_events from plaso.parsers import sqlite from plaso.parsers.sqlite_plugins import interface __author__ = 'Joaquin Moreno Garijo ([email protected])' class SkypeChatEvent(time_events.PosixTimeEvent): """Convenience class for a Skype event.""" DATA_TYPE = u'skype:event:chat' def __init__(self, row, to_account): """Build a Skype Event from a single row. Args: row: A row object (instance of sqlite3.Row) that contains the extracted data from a single row in the database. to_account: A string containing the accounts (excluding the author) of the conversation. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". super(SkypeChatEvent, self).__init__( row['timestamp'], u'Chat from Skype', self.DATA_TYPE) self.title = row['title'] self.text = row['body_xml'] self.from_account = u'{0:s} <{1:s}>'.format( row['from_displayname'], row['author']) self.to_account = to_account class SkypeAccountEvent(time_events.PosixTimeEvent): """Convenience class for account information.""" DATA_TYPE = u'skype:event:account' def __init__( self, timestamp, usage, identifier, full_name, display_name, email, country): """Initialize the event. Args: timestamp: The POSIX timestamp value. usage: A string containing the description string of the timestamp. identifier: The row identifier. full_name: A string containing the full name of the Skype account holder. display_name: A string containing the chosen display name of the account holder. email: A string containing the registered email address of the account holder. country: A string containing the chosen home country of the account holder. """ super(SkypeAccountEvent, self).__init__(timestamp, usage) self.offset = identifier self.username = u'{0:s} <{1:s}>'.format(full_name, display_name) self.display_name = display_name self.email = email self.country = country self.data_type = self.DATA_TYPE class SkypeSMSEvent(time_events.PosixTimeEvent): """Convenience EventObject for SMS.""" DATA_TYPE = u'skype:event:sms' def __init__(self, row, dst_number): """Read the information related with the SMS. Args: row: row form the sql query. row['time_sms']: timestamp when the sms was send. row['dstnum_sms']: number which receives the sms. row['msg_sms']: text send to this sms. dst_number: phone number where the user send the sms. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". super(SkypeSMSEvent, self).__init__( row['time_sms'], u'SMS from Skype', self.DATA_TYPE) self.number = dst_number self.text = row['msg_sms'] class SkypeCallEvent(time_events.PosixTimeEvent): """Convenience EventObject for the calls.""" DATA_TYPE = u'skype:event:call' def __init__(self, timestamp, call_type, user_start_call, source, destination, video_conference): """Contains information if the call was cancelled, accepted or finished. Args: timestamp: the timestamp of the event. call_type: WAITING, STARTED, FINISHED. user_start_call: boolean, true indicates that the owner account started the call. source: the account which started the call. destination: the account which gets the call. video_conference: boolean, if is true it was a videoconference. """ super(SkypeCallEvent, self).__init__( timestamp, u'Call from Skype', self.DATA_TYPE) self.call_type = call_type self.user_start_call = user_start_call self.src_call = source self.dst_call = destination self.video_conference = video_conference class SkypeTransferFileEvent(time_events.PosixTimeEvent): """Evaluate the action of send a file.""" DATA_TYPE = u'skype:event:transferfile' def __init__(self, row, timestamp, action_type, source, destination): """Actions related with sending files. Args: row: filepath: path from the file. filename: name of the file. filesize: size of the file. timestamp: when the action happens. action_type: GETSOLICITUDE, SENDSOLICITUDE, ACCEPTED, FINISHED. source: The account that sent the file. destination: The account that received the file. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". super(SkypeTransferFileEvent, self).__init__( timestamp, u'File transfer from Skype', self.DATA_TYPE) self.offset = row['id'] self.action_type = action_type self.source = source self.destination = destination self.transferred_filepath = row['filepath'] self.transferred_filename = row['filename'] try: self.transferred_filesize = int(row['filesize']) except ValueError: logging.debug(u'Unknown filesize {0:s}'.format( self.transferred_filename)) self.transferred_filesize = 0 class SkypePlugin(interface.SQLitePlugin): """SQLite plugin for Skype main.db SQlite database file.""" NAME = u'skype' DESCRIPTION = u'Parser for Skype SQLite database files.' # Queries for building cache. QUERY_DEST_FROM_TRANSFER = ( u'SELECT parent_id, partner_handle AS skypeid, ' u'partner_dispname AS skypename FROM transfers') QUERY_SOURCE_FROM_TRANSFER = ( u'SELECT pk_id, partner_handle AS skypeid, ' u'partner_dispname AS skypename FROM transfers') # Define the needed queries. QUERIES = [ ((u'SELECT c.id, c.participants, c.friendlyname AS title, ' u'm.author AS author, m.from_dispname AS from_displayname, ' u'm.body_xml, m.timestamp, c.dialog_partner FROM Chats c, Messages m ' u'WHERE c.name = m.chatname'), u'ParseChat'), ((u'SELECT id, fullname, given_displayname, emails, ' u'country, profile_timestamp, authreq_timestamp, ' u'lastonline_timestamp, mood_timestamp, sent_authrequest_time, ' u'lastused_timestamp FROM Accounts'), u'ParseAccountInformation'), ((u'SELECT id, target_numbers AS dstnum_sms, timestamp AS time_sms, ' u'body AS msg_sms FROM SMSes'), u'ParseSMS'), ((u'SELECT id, partner_handle, partner_dispname, offer_send_list, ' u'starttime, accepttime, finishtime, filepath, filename, filesize, ' u'status, parent_id, pk_id FROM Transfers'), u'ParseFileTransfer'), ((u'SELECT c.id, cm.guid, c.is_incoming, ' u'cm.call_db_id, cm.videostatus, c.begin_timestamp AS try_call, ' u'cm.start_timestamp AS accept_call, cm.call_duration ' u'FROM Calls c, CallMembers cm ' u'WHERE c.id = cm.call_db_id;'), u'ParseCall')] # The required tables. REQUIRED_TABLES = frozenset([ u'Chats', u'Accounts', u'Conversations', u'Contacts', u'SMSes', u'Transfers', u'CallMembers', u'Calls']) def ParseAccountInformation( self, parser_mediator, row, query=None, **unused_kwargs): """Parses the Accounts database. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". if row['profile_timestamp']: event_object = SkypeAccountEvent( row['profile_timestamp'], u'Profile Changed', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['authreq_timestamp']: event_object = SkypeAccountEvent( row['authreq_timestamp'], u'Authenticate Request', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['lastonline_timestamp']: event_object = SkypeAccountEvent( row['lastonline_timestamp'], u'Last Online', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['mood_timestamp']: event_object = SkypeAccountEvent( row['mood_timestamp'], u'Mood Event', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['sent_authrequest_time']: event_object = SkypeAccountEvent( row['sent_authrequest_time'], u'Auth Request Sent', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['lastused_timestamp']: event_object = SkypeAccountEvent( row['lastused_timestamp'], u'Last Used', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) def ParseChat(self, parser_mediator, row, query=None, **unused_kwargs): """Parses a chat message row. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". to_account = u'' accounts = [] participants = row['participants'].split(' ') for participant in participants: if participant != row['author']: accounts.append(participant) to_account = u', '.join(accounts) if not to_account: if row['dialog_partner']: to_account = row['dialog_partner'] else: to_account = u'Unknown User' event_object = SkypeChatEvent(row, to_account) parser_mediator.ProduceEvent(event_object, query=query) def ParseSMS(self, parser_mediator, row, query=None, **unused_kwargs): """Parse SMS. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". dst_number = row['dstnum_sms'].replace(u' ', u'') event_object = SkypeSMSEvent(row, dst_number) parser_mediator.ProduceEvent(event_object, query=query) def ParseCall(self, parser_mediator, row, query=None, **unused_kwargs): """Parse the calls taking into accounts some rows. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None. """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". try: aux = row['guid'] if aux: aux_list = aux.split(u'-') src_aux = aux_list[0] dst_aux = aux_list[1] else: src_aux = u'Unknown [no GUID]' dst_aux = u'Unknown [no GUID]' except IndexError: src_aux = u'Unknown [{0:s}]'.format(row['guid']) dst_aux = u'Unknown [{0:s}]'.format(row['guid']) if row['is_incoming'] == u'0': user_start_call = True source = src_aux if row['ip_address']: destination = u'{0:s} <{1:s}>'.format(dst_aux, row['ip_address']) else: destination = dst_aux else: user_start_call = False source = src_aux destination = dst_aux if row['videostatus'] == u'3': video_conference = True else: video_conference = False event_object = SkypeCallEvent( row['try_call'], u'WAITING', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) if row['accept_call']: event_object = SkypeCallEvent( row['accept_call'], u'ACCEPTED', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) if row['call_duration']: try: timestamp = int(row['accept_call']) + int(row['call_duration']) event_object = SkypeCallEvent( timestamp, u'FINISHED', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) except ValueError: logging.debug(( u'[{0:s}] Unable to determine when the call {1:s} was ' u'finished.').format(self.NAME, row['id'])) def ParseFileTransfer( self, parser_mediator, row, cache=None, database=None, query=None, **unused_kwargs): """Parse the transfer files. There is no direct relationship between who sends the file and who accepts the file. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: the row with all information related with the file transfers. query: Optional query string. The default is None. cache: a cache object (instance of SQLiteCache). database: A database object (instance of SQLiteDatabase). """ # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". source_dict = cache.GetResults(u'source') if not source_dict: cursor = database.cursor results = cursor.execute(self.QUERY_SOURCE_FROM_TRANSFER) # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". cache.CacheQueryResults( results, 'source', 'pk_id', ('skypeid', 'skypename')) source_dict = cache.GetResults(u'source') dest_dict = cache.GetResults(u'destination') if not dest_dict: cursor = database.cursor results = cursor.execute(self.QUERY_DEST_FROM_TRANSFER) # Note that pysqlite does not accept a Unicode string in row['string'] and # will raise "IndexError: Index must be int or string". cache.CacheQueryResults( results, 'destination', 'parent_id', ('skypeid', 'skypename')) dest_dict = cache.GetResults(u'destination') source = u'Unknown' destination = u'Unknown' if row['parent_id']: destination = u'{0:s} <{1:s}>'.format( row['partner_handle'], row['partner_dispname']) skype_id, skype_name = source_dict.get(row['parent_id'], [None, None]) if skype_name: source = u'{0:s} <{1:s}>'.format(skype_id, skype_name) else: source = u'{0:s} <{1:s}>'.format( row['partner_handle'], row['partner_dispname']) if row['pk_id']: skype_id, skype_name = dest_dict.get(row['pk_id'], [None, None]) if skype_name: destination = u'{0:s} <{1:s}>'.format(skype_id, skype_name) if row['status'] == 8: if row['starttime']: event_object = SkypeTransferFileEvent( row, row['starttime'], u'GETSOLICITUDE', source, destination) parser_mediator.ProduceEvent(event_object, query=query) if row['accepttime']: event_object = SkypeTransferFileEvent( row, row['accepttime'], u'ACCEPTED', source, destination) parser_mediator.ProduceEvent(event_object, query=query) if row['finishtime']: event_object = SkypeTransferFileEvent( row, row['finishtime'], u'FINISHED', source, destination) parser_mediator.ProduceEvent(event_object, query=query) elif row['status'] == 2 and row['starttime']: event_object = SkypeTransferFileEvent( row, row['starttime'], u'SENDSOLICITUDE', source, destination) parser_mediator.ProduceEvent(event_object, query=query) sqlite.SQLiteParser.RegisterPlugin(SkypePlugin)
set_figure_params
Set resolution/size, styling and format of figures. Parameters ---------- scanpy Init default values for :obj:`matplotlib.rcParams` suited for Scanpy. dpi Resolution of rendered figures - this influences the size of figures in notebooks. dpi_save Resolution of saved figures. This should typically be higher to achieve publication quality. frameon Add frames and axes labels to scatter plots. vector_friendly Plot scatter plots using `png` backend even when exporting as `pdf` or `svg`. fontsize Set the fontsize for several `rcParams` entries. Ignored if `scanpy=False`. color_map Convenience method for setting the default color map. Ignored if `scanpy=False`. format: {`'png'`, `'pdf'`, `'svg'`, etc.}, optional (default: `'pdf'`) This sets the default format for saving figures: `file_format_figs`. transparent Save figures with transparent back ground. Sets `rcParams['savefig.transparent']`. ipython_format Only concerns the notebook/IPython environment; see :func:`~IPython.display.set_matplotlib_formats` for details.
import inspect import sys from enum import IntEnum from pathlib import Path from time import time from logging import getLevelName from typing import Tuple, Union, Any, List, Iterable, TextIO, Optional from . import logging from .logging import _set_log_level, _set_log_file, RootLogger _VERBOSITY_TO_LOGLEVEL = { 'error': 'ERROR', 'warning': 'WARNING', 'info': 'INFO', 'hint': 'HINT', 'debug': 'DEBUG', } # Python 3.7 ensures iteration order for v, level in enumerate(list(_VERBOSITY_TO_LOGLEVEL.values())): _VERBOSITY_TO_LOGLEVEL[v] = level class Verbosity(IntEnum): error = 0 warn = 1 info = 2 hint = 3 debug = 4 @property def level(self) -> int: # getLevelName(str) returns the int level… return getLevelName(_VERBOSITY_TO_LOGLEVEL[self]) def _type_check(var: Any, varname: str, types: Union[type, Tuple[type, ...]]): if isinstance(var, types): return if isinstance(types, type): possible_types_str = types.__name__ else: type_names = [t.__name__ for t in types] possible_types_str = "{} or {}".format( ", ".join(type_names[:-1]), type_names[-1] ) raise TypeError(f"{varname} must be of type {possible_types_str}") class ScanpyConfig: """Config manager for scanpy. """ def __init__( self, *, verbosity: str = "warning", plot_suffix: str = "", file_format_data: str = "h5ad", file_format_figs: str = "pdf", autosave: bool = False, autoshow: bool = True, writedir: Union[str, Path] = "./write/", cachedir: Union[str, Path] = "./cache/", datasetdir: Union[str, Path] = "./data/", figdir: Union[str, Path] = "./figures/", max_memory=15, n_jobs=1, logfile: Union[str, Path, None] = None, categories_to_ignore: Iterable[str] = ("N/A", "dontknow", "no_gate", "?"), _frameon: bool = True, _vector_friendly: bool = False, _low_resolution_warning: bool = True, ): # logging self._root_logger = RootLogger(logging.INFO) # level will be replaced self.logfile = logfile self.verbosity = verbosity # rest self.plot_suffix = plot_suffix self.file_format_data = file_format_data self.file_format_figs = file_format_figs self.autosave = autosave self.autoshow = autoshow self.writedir = writedir self.cachedir = cachedir self.datasetdir = datasetdir self.figdir = figdir self.max_memory = max_memory self.n_jobs = n_jobs self.categories_to_ignore = categories_to_ignore self._frameon = _frameon """bool: See set_figure_params.""" self._vector_friendly = _vector_friendly """Set to true if you want to include pngs in svgs and pdfs.""" self._low_resolution_warning = _low_resolution_warning """Print warning when saving a figure with low resolution.""" self._start = time() """Time when the settings module is first imported.""" self._previous_time = self._start """Variable for timing program parts.""" self._previous_memory_usage = -1 """Stores the previous memory usage.""" @property def verbosity(self) -> Verbosity: """ Verbosity level (default `warning`) Level 0: only show 'error' messages. Level 1: also show 'warning' messages. Level 2: also show 'info' messages. Level 3: also show 'hint' messages. Level 4: also show very detailed progress for 'debug'ging. """ return self._verbosity @verbosity.setter def verbosity(self, verbosity: Union[Verbosity, int, str]): verbosity_str_options = [ v for v in _VERBOSITY_TO_LOGLEVEL if isinstance(v, str) ] if isinstance(verbosity, Verbosity): self._verbosity = verbosity elif isinstance(verbosity, int): self._verbosity = Verbosity(verbosity) elif isinstance(verbosity, str): verbosity = verbosity.lower() if verbosity not in verbosity_str_options: raise ValueError( f"Cannot set verbosity to {verbosity}. " f"Accepted string values are: {verbosity_str_options}" ) else: self._verbosity = Verbosity(verbosity_str_options.index(verbosity)) else: _type_check(verbosity, "verbosity", (str, int)) _set_log_level(self, _VERBOSITY_TO_LOGLEVEL[self._verbosity]) @property def plot_suffix(self) -> str: """Global suffix that is appended to figure filenames. """ return self._plot_suffix @plot_suffix.setter def plot_suffix(self, plot_suffix: str): _type_check(plot_suffix, "plot_suffix", str) self._plot_suffix = plot_suffix @property def file_format_data(self) -> str: """File format for saving AnnData objects. Allowed are 'txt', 'csv' (comma separated value file) for exporting and 'h5ad' (hdf5) for lossless saving. """ return self._file_format_data @file_format_data.setter def file_format_data(self, file_format: str): _type_check(file_format, "file_format_data", str) file_format_options = {"txt", "csv", "h5ad"} if file_format not in file_format_options: raise ValueError( f"Cannot set file_format_data to {file_format}. " f"Must be one of {file_format_options}" ) self._file_format_data = file_format @property def file_format_figs(self) -> str: """File format for saving figures. For example 'png', 'pdf' or 'svg'. Many other formats work as well (see `matplotlib.pyplot.savefig`). """ return self._file_format_figs @file_format_figs.setter def file_format_figs(self, figure_format: str): _type_check(figure_format, "figure_format_data", str) self._file_format_figs = figure_format @property def autosave(self) -> bool: """\ Automatically save figures in :attr:`~scanpy._settings.ScanpyConfig.figdir` (default `False`). Do not show plots/figures interactively. """ return self._autosave @autosave.setter def autosave(self, autosave: bool): _type_check(autosave, "autosave", bool) self._autosave = autosave @property def autoshow(self) -> bool: """\ Automatically show figures if `autosave == False` (default `True`). There is no need to call the matplotlib pl.show() in this case. """ return self._autoshow @autoshow.setter def autoshow(self, autoshow: bool): _type_check(autoshow, "autoshow", bool) self._autoshow = autoshow @property def writedir(self) -> Path: """\ Directory where the function scanpy.write writes to by default. """ return self._writedir @writedir.setter def writedir(self, writedir: Union[str, Path]): _type_check(writedir, "writedir", (str, Path)) self._writedir = Path(writedir) @property def cachedir(self) -> Path: """\ Directory for cache files (default `'./cache/'`). """ return self._cachedir @cachedir.setter def cachedir(self, cachedir: Union[str, Path]): _type_check(cachedir, "cachedir", (str, Path)) self._cachedir = Path(cachedir) @property def datasetdir(self) -> Path: """\ Directory for example :mod:`~scanpy.datasets` (default `'./data/'`). """ return self._datasetdir @datasetdir.setter def datasetdir(self, datasetdir: Union[str, Path]): _type_check(datasetdir, "datasetdir", (str, Path)) self._datasetdir = Path(datasetdir).resolve() @property def figdir(self) -> Path: """\ Directory for saving figures (default `'./figures/'`). """ return self._figdir @figdir.setter def figdir(self, figdir: Union[str, Path]): _type_check(figdir, "figdir", (str, Path)) self._figdir = Path(figdir) @property def max_memory(self) -> Union[int, float]: """\ Maximal memory usage in Gigabyte. Is currently not well respected.... """ return self._max_memory @max_memory.setter def max_memory(self, max_memory: Union[int, float]): _type_check(max_memory, "max_memory", (int, float)) self._max_memory = max_memory @property def n_jobs(self) -> int: """\ Default number of jobs/ CPUs to use for parallel computing. """ return self._n_jobs @n_jobs.setter def n_jobs(self, n_jobs: int): _type_check(n_jobs, "n_jobs", int) self._n_jobs = n_jobs @property def logpath(self) -> Optional[Path]: """\ The file path `logfile` was set to. """ return self._logpath @logpath.setter def logpath(self, logpath: Union[str, Path, None]): _type_check(logpath, "logfile", (str, Path)) # set via “file object” branch of logfile.setter self.logfile = Path(logpath).open('a') self._logpath = Path(logpath) @property def logfile(self) -> TextIO: """\ The open file to write logs to. Set it to a :class:`~pathlib.Path` or :class:`str` to open a new one. The default `None` corresponds to :obj:`sys.stdout` in jupyter notebooks and to :obj:`sys.stderr` otherwise. For backwards compatibility, setting it to `''` behaves like setting it to `None`. """ return self._logfile @logfile.setter def logfile(self, logfile: Union[str, Path, TextIO, None]): if not hasattr(logfile, 'write') and logfile: self.logpath = logfile else: # file object if not logfile: # None or '' logfile = sys.stdout if self._is_run_from_ipython() else sys.stderr self._logfile = logfile self._logpath = None _set_log_file(self) @property def categories_to_ignore(self) -> List[str]: """\ Categories that are omitted in plotting etc. """ return self._categories_to_ignore @categories_to_ignore.setter def categories_to_ignore(self, categories_to_ignore: Iterable[str]): categories_to_ignore = list(categories_to_ignore) for i, cat in enumerate(categories_to_ignore): _type_check(cat, f"categories_to_ignore[{i}]", str) self._categories_to_ignore = categories_to_ignore # -------------------------------------------------------------------------------- # Functions # -------------------------------------------------------------------------------- # MASKED: set_figure_params function (lines 349-410) @staticmethod def _is_run_from_ipython(): """Determines whether run from Ipython. Only affects progress bars. """ try: __IPYTHON__ return True except NameError: return False def __str__(self) -> str: return '\n'.join( f'{k} = {v!r}' for k, v in inspect.getmembers(self) if not k.startswith("_") and not k == 'getdoc' ) settings = ScanpyConfig()
def set_figure_params( self, scanpy: bool = True, dpi: int = 80, dpi_save: int = 150, frameon: bool = True, vector_friendly: bool = True, fontsize: int = 14, color_map: Optional[str] = None, format: Union[str, Iterable[str]] = "pdf", transparent: bool = False, ipython_format: str = "png2x", ): """\ Set resolution/size, styling and format of figures. Parameters ---------- scanpy Init default values for :obj:`matplotlib.rcParams` suited for Scanpy. dpi Resolution of rendered figures - this influences the size of figures in notebooks. dpi_save Resolution of saved figures. This should typically be higher to achieve publication quality. frameon Add frames and axes labels to scatter plots. vector_friendly Plot scatter plots using `png` backend even when exporting as `pdf` or `svg`. fontsize Set the fontsize for several `rcParams` entries. Ignored if `scanpy=False`. color_map Convenience method for setting the default color map. Ignored if `scanpy=False`. format: {`'png'`, `'pdf'`, `'svg'`, etc.}, optional (default: `'pdf'`) This sets the default format for saving figures: `file_format_figs`. transparent Save figures with transparent back ground. Sets `rcParams['savefig.transparent']`. ipython_format Only concerns the notebook/IPython environment; see :func:`~IPython.display.set_matplotlib_formats` for details. """ try: import IPython if isinstance(ipython_format, str): ipython_format = [ipython_format] IPython.display.set_matplotlib_formats(*ipython_format) except Exception: pass from matplotlib import rcParams self._vector_friendly = vector_friendly self.file_format_figs = format if dpi is not None: rcParams["figure.dpi"] = dpi if dpi_save is not None: rcParams["savefig.dpi"] = dpi_save if transparent is not None: rcParams["savefig.transparent"] = transparent if scanpy: from .plotting._rcmod import set_rcParams_scanpy set_rcParams_scanpy(fontsize=fontsize, color_map=color_map) self._frameon = frameon
349
410
import inspect import sys from enum import IntEnum from pathlib import Path from time import time from logging import getLevelName from typing import Tuple, Union, Any, List, Iterable, TextIO, Optional from . import logging from .logging import _set_log_level, _set_log_file, RootLogger _VERBOSITY_TO_LOGLEVEL = { 'error': 'ERROR', 'warning': 'WARNING', 'info': 'INFO', 'hint': 'HINT', 'debug': 'DEBUG', } # Python 3.7 ensures iteration order for v, level in enumerate(list(_VERBOSITY_TO_LOGLEVEL.values())): _VERBOSITY_TO_LOGLEVEL[v] = level class Verbosity(IntEnum): error = 0 warn = 1 info = 2 hint = 3 debug = 4 @property def level(self) -> int: # getLevelName(str) returns the int level… return getLevelName(_VERBOSITY_TO_LOGLEVEL[self]) def _type_check(var: Any, varname: str, types: Union[type, Tuple[type, ...]]): if isinstance(var, types): return if isinstance(types, type): possible_types_str = types.__name__ else: type_names = [t.__name__ for t in types] possible_types_str = "{} or {}".format( ", ".join(type_names[:-1]), type_names[-1] ) raise TypeError(f"{varname} must be of type {possible_types_str}") class ScanpyConfig: """Config manager for scanpy. """ def __init__( self, *, verbosity: str = "warning", plot_suffix: str = "", file_format_data: str = "h5ad", file_format_figs: str = "pdf", autosave: bool = False, autoshow: bool = True, writedir: Union[str, Path] = "./write/", cachedir: Union[str, Path] = "./cache/", datasetdir: Union[str, Path] = "./data/", figdir: Union[str, Path] = "./figures/", max_memory=15, n_jobs=1, logfile: Union[str, Path, None] = None, categories_to_ignore: Iterable[str] = ("N/A", "dontknow", "no_gate", "?"), _frameon: bool = True, _vector_friendly: bool = False, _low_resolution_warning: bool = True, ): # logging self._root_logger = RootLogger(logging.INFO) # level will be replaced self.logfile = logfile self.verbosity = verbosity # rest self.plot_suffix = plot_suffix self.file_format_data = file_format_data self.file_format_figs = file_format_figs self.autosave = autosave self.autoshow = autoshow self.writedir = writedir self.cachedir = cachedir self.datasetdir = datasetdir self.figdir = figdir self.max_memory = max_memory self.n_jobs = n_jobs self.categories_to_ignore = categories_to_ignore self._frameon = _frameon """bool: See set_figure_params.""" self._vector_friendly = _vector_friendly """Set to true if you want to include pngs in svgs and pdfs.""" self._low_resolution_warning = _low_resolution_warning """Print warning when saving a figure with low resolution.""" self._start = time() """Time when the settings module is first imported.""" self._previous_time = self._start """Variable for timing program parts.""" self._previous_memory_usage = -1 """Stores the previous memory usage.""" @property def verbosity(self) -> Verbosity: """ Verbosity level (default `warning`) Level 0: only show 'error' messages. Level 1: also show 'warning' messages. Level 2: also show 'info' messages. Level 3: also show 'hint' messages. Level 4: also show very detailed progress for 'debug'ging. """ return self._verbosity @verbosity.setter def verbosity(self, verbosity: Union[Verbosity, int, str]): verbosity_str_options = [ v for v in _VERBOSITY_TO_LOGLEVEL if isinstance(v, str) ] if isinstance(verbosity, Verbosity): self._verbosity = verbosity elif isinstance(verbosity, int): self._verbosity = Verbosity(verbosity) elif isinstance(verbosity, str): verbosity = verbosity.lower() if verbosity not in verbosity_str_options: raise ValueError( f"Cannot set verbosity to {verbosity}. " f"Accepted string values are: {verbosity_str_options}" ) else: self._verbosity = Verbosity(verbosity_str_options.index(verbosity)) else: _type_check(verbosity, "verbosity", (str, int)) _set_log_level(self, _VERBOSITY_TO_LOGLEVEL[self._verbosity]) @property def plot_suffix(self) -> str: """Global suffix that is appended to figure filenames. """ return self._plot_suffix @plot_suffix.setter def plot_suffix(self, plot_suffix: str): _type_check(plot_suffix, "plot_suffix", str) self._plot_suffix = plot_suffix @property def file_format_data(self) -> str: """File format for saving AnnData objects. Allowed are 'txt', 'csv' (comma separated value file) for exporting and 'h5ad' (hdf5) for lossless saving. """ return self._file_format_data @file_format_data.setter def file_format_data(self, file_format: str): _type_check(file_format, "file_format_data", str) file_format_options = {"txt", "csv", "h5ad"} if file_format not in file_format_options: raise ValueError( f"Cannot set file_format_data to {file_format}. " f"Must be one of {file_format_options}" ) self._file_format_data = file_format @property def file_format_figs(self) -> str: """File format for saving figures. For example 'png', 'pdf' or 'svg'. Many other formats work as well (see `matplotlib.pyplot.savefig`). """ return self._file_format_figs @file_format_figs.setter def file_format_figs(self, figure_format: str): _type_check(figure_format, "figure_format_data", str) self._file_format_figs = figure_format @property def autosave(self) -> bool: """\ Automatically save figures in :attr:`~scanpy._settings.ScanpyConfig.figdir` (default `False`). Do not show plots/figures interactively. """ return self._autosave @autosave.setter def autosave(self, autosave: bool): _type_check(autosave, "autosave", bool) self._autosave = autosave @property def autoshow(self) -> bool: """\ Automatically show figures if `autosave == False` (default `True`). There is no need to call the matplotlib pl.show() in this case. """ return self._autoshow @autoshow.setter def autoshow(self, autoshow: bool): _type_check(autoshow, "autoshow", bool) self._autoshow = autoshow @property def writedir(self) -> Path: """\ Directory where the function scanpy.write writes to by default. """ return self._writedir @writedir.setter def writedir(self, writedir: Union[str, Path]): _type_check(writedir, "writedir", (str, Path)) self._writedir = Path(writedir) @property def cachedir(self) -> Path: """\ Directory for cache files (default `'./cache/'`). """ return self._cachedir @cachedir.setter def cachedir(self, cachedir: Union[str, Path]): _type_check(cachedir, "cachedir", (str, Path)) self._cachedir = Path(cachedir) @property def datasetdir(self) -> Path: """\ Directory for example :mod:`~scanpy.datasets` (default `'./data/'`). """ return self._datasetdir @datasetdir.setter def datasetdir(self, datasetdir: Union[str, Path]): _type_check(datasetdir, "datasetdir", (str, Path)) self._datasetdir = Path(datasetdir).resolve() @property def figdir(self) -> Path: """\ Directory for saving figures (default `'./figures/'`). """ return self._figdir @figdir.setter def figdir(self, figdir: Union[str, Path]): _type_check(figdir, "figdir", (str, Path)) self._figdir = Path(figdir) @property def max_memory(self) -> Union[int, float]: """\ Maximal memory usage in Gigabyte. Is currently not well respected.... """ return self._max_memory @max_memory.setter def max_memory(self, max_memory: Union[int, float]): _type_check(max_memory, "max_memory", (int, float)) self._max_memory = max_memory @property def n_jobs(self) -> int: """\ Default number of jobs/ CPUs to use for parallel computing. """ return self._n_jobs @n_jobs.setter def n_jobs(self, n_jobs: int): _type_check(n_jobs, "n_jobs", int) self._n_jobs = n_jobs @property def logpath(self) -> Optional[Path]: """\ The file path `logfile` was set to. """ return self._logpath @logpath.setter def logpath(self, logpath: Union[str, Path, None]): _type_check(logpath, "logfile", (str, Path)) # set via “file object” branch of logfile.setter self.logfile = Path(logpath).open('a') self._logpath = Path(logpath) @property def logfile(self) -> TextIO: """\ The open file to write logs to. Set it to a :class:`~pathlib.Path` or :class:`str` to open a new one. The default `None` corresponds to :obj:`sys.stdout` in jupyter notebooks and to :obj:`sys.stderr` otherwise. For backwards compatibility, setting it to `''` behaves like setting it to `None`. """ return self._logfile @logfile.setter def logfile(self, logfile: Union[str, Path, TextIO, None]): if not hasattr(logfile, 'write') and logfile: self.logpath = logfile else: # file object if not logfile: # None or '' logfile = sys.stdout if self._is_run_from_ipython() else sys.stderr self._logfile = logfile self._logpath = None _set_log_file(self) @property def categories_to_ignore(self) -> List[str]: """\ Categories that are omitted in plotting etc. """ return self._categories_to_ignore @categories_to_ignore.setter def categories_to_ignore(self, categories_to_ignore: Iterable[str]): categories_to_ignore = list(categories_to_ignore) for i, cat in enumerate(categories_to_ignore): _type_check(cat, f"categories_to_ignore[{i}]", str) self._categories_to_ignore = categories_to_ignore # -------------------------------------------------------------------------------- # Functions # -------------------------------------------------------------------------------- def set_figure_params( self, scanpy: bool = True, dpi: int = 80, dpi_save: int = 150, frameon: bool = True, vector_friendly: bool = True, fontsize: int = 14, color_map: Optional[str] = None, format: Union[str, Iterable[str]] = "pdf", transparent: bool = False, ipython_format: str = "png2x", ): """\ Set resolution/size, styling and format of figures. Parameters ---------- scanpy Init default values for :obj:`matplotlib.rcParams` suited for Scanpy. dpi Resolution of rendered figures - this influences the size of figures in notebooks. dpi_save Resolution of saved figures. This should typically be higher to achieve publication quality. frameon Add frames and axes labels to scatter plots. vector_friendly Plot scatter plots using `png` backend even when exporting as `pdf` or `svg`. fontsize Set the fontsize for several `rcParams` entries. Ignored if `scanpy=False`. color_map Convenience method for setting the default color map. Ignored if `scanpy=False`. format: {`'png'`, `'pdf'`, `'svg'`, etc.}, optional (default: `'pdf'`) This sets the default format for saving figures: `file_format_figs`. transparent Save figures with transparent back ground. Sets `rcParams['savefig.transparent']`. ipython_format Only concerns the notebook/IPython environment; see :func:`~IPython.display.set_matplotlib_formats` for details. """ try: import IPython if isinstance(ipython_format, str): ipython_format = [ipython_format] IPython.display.set_matplotlib_formats(*ipython_format) except Exception: pass from matplotlib import rcParams self._vector_friendly = vector_friendly self.file_format_figs = format if dpi is not None: rcParams["figure.dpi"] = dpi if dpi_save is not None: rcParams["savefig.dpi"] = dpi_save if transparent is not None: rcParams["savefig.transparent"] = transparent if scanpy: from .plotting._rcmod import set_rcParams_scanpy set_rcParams_scanpy(fontsize=fontsize, color_map=color_map) self._frameon = frameon @staticmethod def _is_run_from_ipython(): """Determines whether run from Ipython. Only affects progress bars. """ try: __IPYTHON__ return True except NameError: return False def __str__(self) -> str: return '\n'.join( f'{k} = {v!r}' for k, v in inspect.getmembers(self) if not k.startswith("_") and not k == 'getdoc' ) settings = ScanpyConfig()
request_endpoint
Request the speech service endpoint Args: audio: Input data frame speech_config: Choice between scoring and output_folder: LUIS app ID case: LUIS subscription key lexical: Minimum confidence score for LUIS result, between 0.00 and 1.00 Returns: df: Scoring data frame with predicted intents and scores Raises: ConnectionError: If file is not found
''' SPEECH-TO-TEXT USING MICROSOFT SPEECH API ''' ''' [email protected] ''' # Import required packages import os import glob import json import logging import codecs import helper as he import azure.cognitiveservices.speech as speechsdk import params as pa # Load and set configuration parameters pa.get_config() # MASKED: request_endpoint function (lines 17-35) def process_recognition(result, filename, output_directory, lexical): """Process recognition received from the speech service Args: result: Result object returned by STT-service filename: Filename for output file output_directory: Output directory for the file lexical: Boolean to enable extended lexical version of STT-result Returns: text: Processed recognition as string """ if result.reason == speechsdk.ResultReason.RecognizedSpeech: if lexical: text = f"{format(result.text)}\t{json.loads(result.json)['NBest'][0]['Lexical']}" else: text = f"{format(result.text)}" logging.info(f"[INFO] - Recognition successful: {filename} -> {result.text}") elif result.reason == speechsdk.ResultReason.NoMatch: logging.warning(filename + "\t" + f"No speech could be recognized: {result.no_match_details}") text = "" elif result.reason == speechsdk.ResultReason.Canceled: cancellation_details = result.cancellation_details logging.error(filename+"\t"+ f"Speech Recognition canceled: {cancellation_details.reason}") if cancellation_details.reason == speechsdk.CancellationReason.Error: logging.error(f"Error details: {cancellation_details.error_details}") text = "" return text # General Function def write_transcription(output_directory, text): """Write transcription to file Args: text: Processed recognition as string output_directory: Output directory for the file Returns: Writes output to file """ if not os.path.exists(f'{output_directory}/transcriptions.txt'): transfile = codecs.open(f'{output_directory}/transcriptions.txt', 'w', encoding='utf-8-sig') transfile.close() logging.warning(f'[INFO] - Created transcript file with utf-8 bom encoding.') with open(f"{output_directory}/transcriptions.txt", "a", encoding='utf-8-sig') as transfile: transfile.write(f'{text}\n') transfile.close() def main(speech_files, output_directory, lexical = False, enable_proxy = False, *argv): """Main function for STT-functionality Args: speech_files: Directory of audio files to be transcribed output_directory: Output directory for the file lexical: Boolean to enable extended lexical version of STT-result enable_proxy: Boolean to enable proxy function in case you need it *argv: Proxy information if enable_proxy is True -> hostname: str, port: str, username: str, password: str Returns: zip(filenames, results): Zipped lists of filenames and STT-results as string """ try: speech_config = speechsdk.SpeechConfig(subscription = pa.config_data['stt_key'], region = pa.config_data['stt_region']) except RuntimeError: logging.error("[ERROR] - Could not retrieve speech config") # If necessary, you can enable a proxy here: # set_proxy(hostname: str, port: str, username: str, password: str) if enable_proxy: speech_config.set_proxy(argv[0], argv[1], argv[2], argv[3]) # Set speech service properties, requesting the detailed response format to make it compatible with lexical format, if wanted speech_config.set_service_property(name='format', value='detailed', channel=speechsdk.ServicePropertyChannel.UriQueryParameter) if pa.config_data['stt_endpoint'] != "": speech_config.endpoint_id = pa.config_data['stt_endpoint'] logging.info(f'[INFO] - Starting to transcribe {len(next(os.walk(speech_files))[2])} audio files') results = [] filenames = [] for audio in glob.iglob(f'{speech_files}*av'): result, filename = request_endpoint(audio, speech_config, output_directory, lexical) results.append(result) filenames.append(filename) # Check the result return zip(filenames, results) if __name__ == '__main__': main("input/audio/", "output/test/")
def request_endpoint(audio, speech_config, output_directory, lexical): """Request the speech service endpoint Args: audio: Input data frame speech_config: Choice between scoring and output_folder: LUIS app ID case: LUIS subscription key lexical: Minimum confidence score for LUIS result, between 0.00 and 1.00 Returns: df: Scoring data frame with predicted intents and scores Raises: ConnectionError: If file is not found """ audio_config = speechsdk.audio.AudioConfig(filename = audio) speech_recognizer = speechsdk.SpeechRecognizer(speech_config = speech_config, audio_config = audio_config) result = speech_recognizer.recognize_once() filename = audio[audio.rindex('\\')+1:] text = process_recognition(result, filename, output_directory, lexical) return text, filename
17
35
''' SPEECH-TO-TEXT USING MICROSOFT SPEECH API ''' ''' [email protected] ''' # Import required packages import os import glob import json import logging import codecs import helper as he import azure.cognitiveservices.speech as speechsdk import params as pa # Load and set configuration parameters pa.get_config() def request_endpoint(audio, speech_config, output_directory, lexical): """Request the speech service endpoint Args: audio: Input data frame speech_config: Choice between scoring and output_folder: LUIS app ID case: LUIS subscription key lexical: Minimum confidence score for LUIS result, between 0.00 and 1.00 Returns: df: Scoring data frame with predicted intents and scores Raises: ConnectionError: If file is not found """ audio_config = speechsdk.audio.AudioConfig(filename = audio) speech_recognizer = speechsdk.SpeechRecognizer(speech_config = speech_config, audio_config = audio_config) result = speech_recognizer.recognize_once() filename = audio[audio.rindex('\\')+1:] text = process_recognition(result, filename, output_directory, lexical) return text, filename def process_recognition(result, filename, output_directory, lexical): """Process recognition received from the speech service Args: result: Result object returned by STT-service filename: Filename for output file output_directory: Output directory for the file lexical: Boolean to enable extended lexical version of STT-result Returns: text: Processed recognition as string """ if result.reason == speechsdk.ResultReason.RecognizedSpeech: if lexical: text = f"{format(result.text)}\t{json.loads(result.json)['NBest'][0]['Lexical']}" else: text = f"{format(result.text)}" logging.info(f"[INFO] - Recognition successful: {filename} -> {result.text}") elif result.reason == speechsdk.ResultReason.NoMatch: logging.warning(filename + "\t" + f"No speech could be recognized: {result.no_match_details}") text = "" elif result.reason == speechsdk.ResultReason.Canceled: cancellation_details = result.cancellation_details logging.error(filename+"\t"+ f"Speech Recognition canceled: {cancellation_details.reason}") if cancellation_details.reason == speechsdk.CancellationReason.Error: logging.error(f"Error details: {cancellation_details.error_details}") text = "" return text # General Function def write_transcription(output_directory, text): """Write transcription to file Args: text: Processed recognition as string output_directory: Output directory for the file Returns: Writes output to file """ if not os.path.exists(f'{output_directory}/transcriptions.txt'): transfile = codecs.open(f'{output_directory}/transcriptions.txt', 'w', encoding='utf-8-sig') transfile.close() logging.warning(f'[INFO] - Created transcript file with utf-8 bom encoding.') with open(f"{output_directory}/transcriptions.txt", "a", encoding='utf-8-sig') as transfile: transfile.write(f'{text}\n') transfile.close() def main(speech_files, output_directory, lexical = False, enable_proxy = False, *argv): """Main function for STT-functionality Args: speech_files: Directory of audio files to be transcribed output_directory: Output directory for the file lexical: Boolean to enable extended lexical version of STT-result enable_proxy: Boolean to enable proxy function in case you need it *argv: Proxy information if enable_proxy is True -> hostname: str, port: str, username: str, password: str Returns: zip(filenames, results): Zipped lists of filenames and STT-results as string """ try: speech_config = speechsdk.SpeechConfig(subscription = pa.config_data['stt_key'], region = pa.config_data['stt_region']) except RuntimeError: logging.error("[ERROR] - Could not retrieve speech config") # If necessary, you can enable a proxy here: # set_proxy(hostname: str, port: str, username: str, password: str) if enable_proxy: speech_config.set_proxy(argv[0], argv[1], argv[2], argv[3]) # Set speech service properties, requesting the detailed response format to make it compatible with lexical format, if wanted speech_config.set_service_property(name='format', value='detailed', channel=speechsdk.ServicePropertyChannel.UriQueryParameter) if pa.config_data['stt_endpoint'] != "": speech_config.endpoint_id = pa.config_data['stt_endpoint'] logging.info(f'[INFO] - Starting to transcribe {len(next(os.walk(speech_files))[2])} audio files') results = [] filenames = [] for audio in glob.iglob(f'{speech_files}*av'): result, filename = request_endpoint(audio, speech_config, output_directory, lexical) results.append(result) filenames.append(filename) # Check the result return zip(filenames, results) if __name__ == '__main__': main("input/audio/", "output/test/")
write_transcription
Write transcription to file Args: text: Processed recognition as string output_directory: Output directory for the file Returns: Writes output to file
''' SPEECH-TO-TEXT USING MICROSOFT SPEECH API ''' ''' [email protected] ''' # Import required packages import os import glob import json import logging import codecs import helper as he import azure.cognitiveservices.speech as speechsdk import params as pa # Load and set configuration parameters pa.get_config() def request_endpoint(audio, speech_config, output_directory, lexical): """Request the speech service endpoint Args: audio: Input data frame speech_config: Choice between scoring and output_folder: LUIS app ID case: LUIS subscription key lexical: Minimum confidence score for LUIS result, between 0.00 and 1.00 Returns: df: Scoring data frame with predicted intents and scores Raises: ConnectionError: If file is not found """ audio_config = speechsdk.audio.AudioConfig(filename = audio) speech_recognizer = speechsdk.SpeechRecognizer(speech_config = speech_config, audio_config = audio_config) result = speech_recognizer.recognize_once() filename = audio[audio.rindex('\\')+1:] text = process_recognition(result, filename, output_directory, lexical) return text, filename def process_recognition(result, filename, output_directory, lexical): """Process recognition received from the speech service Args: result: Result object returned by STT-service filename: Filename for output file output_directory: Output directory for the file lexical: Boolean to enable extended lexical version of STT-result Returns: text: Processed recognition as string """ if result.reason == speechsdk.ResultReason.RecognizedSpeech: if lexical: text = f"{format(result.text)}\t{json.loads(result.json)['NBest'][0]['Lexical']}" else: text = f"{format(result.text)}" logging.info(f"[INFO] - Recognition successful: {filename} -> {result.text}") elif result.reason == speechsdk.ResultReason.NoMatch: logging.warning(filename + "\t" + f"No speech could be recognized: {result.no_match_details}") text = "" elif result.reason == speechsdk.ResultReason.Canceled: cancellation_details = result.cancellation_details logging.error(filename+"\t"+ f"Speech Recognition canceled: {cancellation_details.reason}") if cancellation_details.reason == speechsdk.CancellationReason.Error: logging.error(f"Error details: {cancellation_details.error_details}") text = "" return text # General Function # MASKED: write_transcription function (lines 65-79) def main(speech_files, output_directory, lexical = False, enable_proxy = False, *argv): """Main function for STT-functionality Args: speech_files: Directory of audio files to be transcribed output_directory: Output directory for the file lexical: Boolean to enable extended lexical version of STT-result enable_proxy: Boolean to enable proxy function in case you need it *argv: Proxy information if enable_proxy is True -> hostname: str, port: str, username: str, password: str Returns: zip(filenames, results): Zipped lists of filenames and STT-results as string """ try: speech_config = speechsdk.SpeechConfig(subscription = pa.config_data['stt_key'], region = pa.config_data['stt_region']) except RuntimeError: logging.error("[ERROR] - Could not retrieve speech config") # If necessary, you can enable a proxy here: # set_proxy(hostname: str, port: str, username: str, password: str) if enable_proxy: speech_config.set_proxy(argv[0], argv[1], argv[2], argv[3]) # Set speech service properties, requesting the detailed response format to make it compatible with lexical format, if wanted speech_config.set_service_property(name='format', value='detailed', channel=speechsdk.ServicePropertyChannel.UriQueryParameter) if pa.config_data['stt_endpoint'] != "": speech_config.endpoint_id = pa.config_data['stt_endpoint'] logging.info(f'[INFO] - Starting to transcribe {len(next(os.walk(speech_files))[2])} audio files') results = [] filenames = [] for audio in glob.iglob(f'{speech_files}*av'): result, filename = request_endpoint(audio, speech_config, output_directory, lexical) results.append(result) filenames.append(filename) # Check the result return zip(filenames, results) if __name__ == '__main__': main("input/audio/", "output/test/")
def write_transcription(output_directory, text): """Write transcription to file Args: text: Processed recognition as string output_directory: Output directory for the file Returns: Writes output to file """ if not os.path.exists(f'{output_directory}/transcriptions.txt'): transfile = codecs.open(f'{output_directory}/transcriptions.txt', 'w', encoding='utf-8-sig') transfile.close() logging.warning(f'[INFO] - Created transcript file with utf-8 bom encoding.') with open(f"{output_directory}/transcriptions.txt", "a", encoding='utf-8-sig') as transfile: transfile.write(f'{text}\n') transfile.close()
65
79
''' SPEECH-TO-TEXT USING MICROSOFT SPEECH API ''' ''' [email protected] ''' # Import required packages import os import glob import json import logging import codecs import helper as he import azure.cognitiveservices.speech as speechsdk import params as pa # Load and set configuration parameters pa.get_config() def request_endpoint(audio, speech_config, output_directory, lexical): """Request the speech service endpoint Args: audio: Input data frame speech_config: Choice between scoring and output_folder: LUIS app ID case: LUIS subscription key lexical: Minimum confidence score for LUIS result, between 0.00 and 1.00 Returns: df: Scoring data frame with predicted intents and scores Raises: ConnectionError: If file is not found """ audio_config = speechsdk.audio.AudioConfig(filename = audio) speech_recognizer = speechsdk.SpeechRecognizer(speech_config = speech_config, audio_config = audio_config) result = speech_recognizer.recognize_once() filename = audio[audio.rindex('\\')+1:] text = process_recognition(result, filename, output_directory, lexical) return text, filename def process_recognition(result, filename, output_directory, lexical): """Process recognition received from the speech service Args: result: Result object returned by STT-service filename: Filename for output file output_directory: Output directory for the file lexical: Boolean to enable extended lexical version of STT-result Returns: text: Processed recognition as string """ if result.reason == speechsdk.ResultReason.RecognizedSpeech: if lexical: text = f"{format(result.text)}\t{json.loads(result.json)['NBest'][0]['Lexical']}" else: text = f"{format(result.text)}" logging.info(f"[INFO] - Recognition successful: {filename} -> {result.text}") elif result.reason == speechsdk.ResultReason.NoMatch: logging.warning(filename + "\t" + f"No speech could be recognized: {result.no_match_details}") text = "" elif result.reason == speechsdk.ResultReason.Canceled: cancellation_details = result.cancellation_details logging.error(filename+"\t"+ f"Speech Recognition canceled: {cancellation_details.reason}") if cancellation_details.reason == speechsdk.CancellationReason.Error: logging.error(f"Error details: {cancellation_details.error_details}") text = "" return text # General Function def write_transcription(output_directory, text): """Write transcription to file Args: text: Processed recognition as string output_directory: Output directory for the file Returns: Writes output to file """ if not os.path.exists(f'{output_directory}/transcriptions.txt'): transfile = codecs.open(f'{output_directory}/transcriptions.txt', 'w', encoding='utf-8-sig') transfile.close() logging.warning(f'[INFO] - Created transcript file with utf-8 bom encoding.') with open(f"{output_directory}/transcriptions.txt", "a", encoding='utf-8-sig') as transfile: transfile.write(f'{text}\n') transfile.close() def main(speech_files, output_directory, lexical = False, enable_proxy = False, *argv): """Main function for STT-functionality Args: speech_files: Directory of audio files to be transcribed output_directory: Output directory for the file lexical: Boolean to enable extended lexical version of STT-result enable_proxy: Boolean to enable proxy function in case you need it *argv: Proxy information if enable_proxy is True -> hostname: str, port: str, username: str, password: str Returns: zip(filenames, results): Zipped lists of filenames and STT-results as string """ try: speech_config = speechsdk.SpeechConfig(subscription = pa.config_data['stt_key'], region = pa.config_data['stt_region']) except RuntimeError: logging.error("[ERROR] - Could not retrieve speech config") # If necessary, you can enable a proxy here: # set_proxy(hostname: str, port: str, username: str, password: str) if enable_proxy: speech_config.set_proxy(argv[0], argv[1], argv[2], argv[3]) # Set speech service properties, requesting the detailed response format to make it compatible with lexical format, if wanted speech_config.set_service_property(name='format', value='detailed', channel=speechsdk.ServicePropertyChannel.UriQueryParameter) if pa.config_data['stt_endpoint'] != "": speech_config.endpoint_id = pa.config_data['stt_endpoint'] logging.info(f'[INFO] - Starting to transcribe {len(next(os.walk(speech_files))[2])} audio files') results = [] filenames = [] for audio in glob.iglob(f'{speech_files}*av'): result, filename = request_endpoint(audio, speech_config, output_directory, lexical) results.append(result) filenames.append(filename) # Check the result return zip(filenames, results) if __name__ == '__main__': main("input/audio/", "output/test/")
main
Main function for STT-functionality Args: speech_files: Directory of audio files to be transcribed output_directory: Output directory for the file lexical: Boolean to enable extended lexical version of STT-result enable_proxy: Boolean to enable proxy function in case you need it *argv: Proxy information if enable_proxy is True -> hostname: str, port: str, username: str, password: str Returns: zip(filenames, results): Zipped lists of filenames and STT-results as string
''' SPEECH-TO-TEXT USING MICROSOFT SPEECH API ''' ''' [email protected] ''' # Import required packages import os import glob import json import logging import codecs import helper as he import azure.cognitiveservices.speech as speechsdk import params as pa # Load and set configuration parameters pa.get_config() def request_endpoint(audio, speech_config, output_directory, lexical): """Request the speech service endpoint Args: audio: Input data frame speech_config: Choice between scoring and output_folder: LUIS app ID case: LUIS subscription key lexical: Minimum confidence score for LUIS result, between 0.00 and 1.00 Returns: df: Scoring data frame with predicted intents and scores Raises: ConnectionError: If file is not found """ audio_config = speechsdk.audio.AudioConfig(filename = audio) speech_recognizer = speechsdk.SpeechRecognizer(speech_config = speech_config, audio_config = audio_config) result = speech_recognizer.recognize_once() filename = audio[audio.rindex('\\')+1:] text = process_recognition(result, filename, output_directory, lexical) return text, filename def process_recognition(result, filename, output_directory, lexical): """Process recognition received from the speech service Args: result: Result object returned by STT-service filename: Filename for output file output_directory: Output directory for the file lexical: Boolean to enable extended lexical version of STT-result Returns: text: Processed recognition as string """ if result.reason == speechsdk.ResultReason.RecognizedSpeech: if lexical: text = f"{format(result.text)}\t{json.loads(result.json)['NBest'][0]['Lexical']}" else: text = f"{format(result.text)}" logging.info(f"[INFO] - Recognition successful: {filename} -> {result.text}") elif result.reason == speechsdk.ResultReason.NoMatch: logging.warning(filename + "\t" + f"No speech could be recognized: {result.no_match_details}") text = "" elif result.reason == speechsdk.ResultReason.Canceled: cancellation_details = result.cancellation_details logging.error(filename+"\t"+ f"Speech Recognition canceled: {cancellation_details.reason}") if cancellation_details.reason == speechsdk.CancellationReason.Error: logging.error(f"Error details: {cancellation_details.error_details}") text = "" return text # General Function def write_transcription(output_directory, text): """Write transcription to file Args: text: Processed recognition as string output_directory: Output directory for the file Returns: Writes output to file """ if not os.path.exists(f'{output_directory}/transcriptions.txt'): transfile = codecs.open(f'{output_directory}/transcriptions.txt', 'w', encoding='utf-8-sig') transfile.close() logging.warning(f'[INFO] - Created transcript file with utf-8 bom encoding.') with open(f"{output_directory}/transcriptions.txt", "a", encoding='utf-8-sig') as transfile: transfile.write(f'{text}\n') transfile.close() # MASKED: main function (lines 81-112) if __name__ == '__main__': main("input/audio/", "output/test/")
def main(speech_files, output_directory, lexical = False, enable_proxy = False, *argv): """Main function for STT-functionality Args: speech_files: Directory of audio files to be transcribed output_directory: Output directory for the file lexical: Boolean to enable extended lexical version of STT-result enable_proxy: Boolean to enable proxy function in case you need it *argv: Proxy information if enable_proxy is True -> hostname: str, port: str, username: str, password: str Returns: zip(filenames, results): Zipped lists of filenames and STT-results as string """ try: speech_config = speechsdk.SpeechConfig(subscription = pa.config_data['stt_key'], region = pa.config_data['stt_region']) except RuntimeError: logging.error("[ERROR] - Could not retrieve speech config") # If necessary, you can enable a proxy here: # set_proxy(hostname: str, port: str, username: str, password: str) if enable_proxy: speech_config.set_proxy(argv[0], argv[1], argv[2], argv[3]) # Set speech service properties, requesting the detailed response format to make it compatible with lexical format, if wanted speech_config.set_service_property(name='format', value='detailed', channel=speechsdk.ServicePropertyChannel.UriQueryParameter) if pa.config_data['stt_endpoint'] != "": speech_config.endpoint_id = pa.config_data['stt_endpoint'] logging.info(f'[INFO] - Starting to transcribe {len(next(os.walk(speech_files))[2])} audio files') results = [] filenames = [] for audio in glob.iglob(f'{speech_files}*av'): result, filename = request_endpoint(audio, speech_config, output_directory, lexical) results.append(result) filenames.append(filename) # Check the result return zip(filenames, results)
81
112
''' SPEECH-TO-TEXT USING MICROSOFT SPEECH API ''' ''' [email protected] ''' # Import required packages import os import glob import json import logging import codecs import helper as he import azure.cognitiveservices.speech as speechsdk import params as pa # Load and set configuration parameters pa.get_config() def request_endpoint(audio, speech_config, output_directory, lexical): """Request the speech service endpoint Args: audio: Input data frame speech_config: Choice between scoring and output_folder: LUIS app ID case: LUIS subscription key lexical: Minimum confidence score for LUIS result, between 0.00 and 1.00 Returns: df: Scoring data frame with predicted intents and scores Raises: ConnectionError: If file is not found """ audio_config = speechsdk.audio.AudioConfig(filename = audio) speech_recognizer = speechsdk.SpeechRecognizer(speech_config = speech_config, audio_config = audio_config) result = speech_recognizer.recognize_once() filename = audio[audio.rindex('\\')+1:] text = process_recognition(result, filename, output_directory, lexical) return text, filename def process_recognition(result, filename, output_directory, lexical): """Process recognition received from the speech service Args: result: Result object returned by STT-service filename: Filename for output file output_directory: Output directory for the file lexical: Boolean to enable extended lexical version of STT-result Returns: text: Processed recognition as string """ if result.reason == speechsdk.ResultReason.RecognizedSpeech: if lexical: text = f"{format(result.text)}\t{json.loads(result.json)['NBest'][0]['Lexical']}" else: text = f"{format(result.text)}" logging.info(f"[INFO] - Recognition successful: {filename} -> {result.text}") elif result.reason == speechsdk.ResultReason.NoMatch: logging.warning(filename + "\t" + f"No speech could be recognized: {result.no_match_details}") text = "" elif result.reason == speechsdk.ResultReason.Canceled: cancellation_details = result.cancellation_details logging.error(filename+"\t"+ f"Speech Recognition canceled: {cancellation_details.reason}") if cancellation_details.reason == speechsdk.CancellationReason.Error: logging.error(f"Error details: {cancellation_details.error_details}") text = "" return text # General Function def write_transcription(output_directory, text): """Write transcription to file Args: text: Processed recognition as string output_directory: Output directory for the file Returns: Writes output to file """ if not os.path.exists(f'{output_directory}/transcriptions.txt'): transfile = codecs.open(f'{output_directory}/transcriptions.txt', 'w', encoding='utf-8-sig') transfile.close() logging.warning(f'[INFO] - Created transcript file with utf-8 bom encoding.') with open(f"{output_directory}/transcriptions.txt", "a", encoding='utf-8-sig') as transfile: transfile.write(f'{text}\n') transfile.close() def main(speech_files, output_directory, lexical = False, enable_proxy = False, *argv): """Main function for STT-functionality Args: speech_files: Directory of audio files to be transcribed output_directory: Output directory for the file lexical: Boolean to enable extended lexical version of STT-result enable_proxy: Boolean to enable proxy function in case you need it *argv: Proxy information if enable_proxy is True -> hostname: str, port: str, username: str, password: str Returns: zip(filenames, results): Zipped lists of filenames and STT-results as string """ try: speech_config = speechsdk.SpeechConfig(subscription = pa.config_data['stt_key'], region = pa.config_data['stt_region']) except RuntimeError: logging.error("[ERROR] - Could not retrieve speech config") # If necessary, you can enable a proxy here: # set_proxy(hostname: str, port: str, username: str, password: str) if enable_proxy: speech_config.set_proxy(argv[0], argv[1], argv[2], argv[3]) # Set speech service properties, requesting the detailed response format to make it compatible with lexical format, if wanted speech_config.set_service_property(name='format', value='detailed', channel=speechsdk.ServicePropertyChannel.UriQueryParameter) if pa.config_data['stt_endpoint'] != "": speech_config.endpoint_id = pa.config_data['stt_endpoint'] logging.info(f'[INFO] - Starting to transcribe {len(next(os.walk(speech_files))[2])} audio files') results = [] filenames = [] for audio in glob.iglob(f'{speech_files}*av'): result, filename = request_endpoint(audio, speech_config, output_directory, lexical) results.append(result) filenames.append(filename) # Check the result return zip(filenames, results) if __name__ == '__main__': main("input/audio/", "output/test/")
save_checkpoint
Save model, optimizer, scheduler and training stats to file. Args: params: It is returned by :func:`get_params`. model: The training model.
#!/usr/bin/env python3 # Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang # Mingshuang Luo) # # See ../../../../LICENSE for clarification regarding multiple authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging from pathlib import Path from shutil import copyfile from typing import Optional, Tuple import k2 import torch import torch.multiprocessing as mp import torch.nn as nn import torch.optim as optim from asr_datamodule import LibriSpeechAsrDataModule from lhotse.utils import fix_random_seed from model import TdnnLstm from torch import Tensor from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.utils import clip_grad_norm_ from torch.optim.lr_scheduler import StepLR from torch.utils.tensorboard import SummaryWriter from icefall.checkpoint import load_checkpoint from icefall.checkpoint import save_checkpoint as save_checkpoint_impl from icefall.dist import cleanup_dist, setup_dist from icefall.graph_compiler import CtcTrainingGraphCompiler from icefall.lexicon import Lexicon from icefall.utils import ( AttributeDict, MetricsTracker, encode_supervisions, get_env_info, setup_logger, str2bool, ) def get_parser(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument( "--world-size", type=int, default=1, help="Number of GPUs for DDP training.", ) parser.add_argument( "--master-port", type=int, default=12354, help="Master port to use for DDP training.", ) parser.add_argument( "--tensorboard", type=str2bool, default=True, help="Should various information be logged in tensorboard.", ) parser.add_argument( "--num-epochs", type=int, default=20, help="Number of epochs to train.", ) parser.add_argument( "--start-epoch", type=int, default=0, help="""Resume training from from this epoch. If it is positive, it will load checkpoint from tdnn_lstm_ctc/exp/epoch-{start_epoch-1}.pt """, ) return parser def get_params() -> AttributeDict: """Return a dict containing training parameters. All training related parameters that are not passed from the commandline is saved in the variable `params`. Commandline options are merged into `params` after they are parsed, so you can also access them via `params`. Explanation of options saved in `params`: - exp_dir: It specifies the directory where all training related files, e.g., checkpoints, log, etc, are saved - lang_dir: It contains language related input files such as "lexicon.txt" - lr: It specifies the initial learning rate - feature_dim: The model input dim. It has to match the one used in computing features. - weight_decay: The weight_decay for the optimizer. - subsampling_factor: The subsampling factor for the model. - best_train_loss: Best training loss so far. It is used to select the model that has the lowest training loss. It is updated during the training. - best_valid_loss: Best validation loss so far. It is used to select the model that has the lowest validation loss. It is updated during the training. - best_train_epoch: It is the epoch that has the best training loss. - best_valid_epoch: It is the epoch that has the best validation loss. - batch_idx_train: Used to writing statistics to tensorboard. It contains number of batches trained so far across epochs. - log_interval: Print training loss if batch_idx % log_interval` is 0 - reset_interval: Reset statistics if batch_idx % reset_interval is 0 - valid_interval: Run validation if batch_idx % valid_interval` is 0 - beam_size: It is used in k2.ctc_loss - reduction: It is used in k2.ctc_loss - use_double_scores: It is used in k2.ctc_loss """ params = AttributeDict( { "exp_dir": Path("tdnn_lstm_ctc/exp"), "lang_dir": Path("data/lang_phone"), "lr": 1e-3, "feature_dim": 80, "weight_decay": 5e-4, "subsampling_factor": 3, "best_train_loss": float("inf"), "best_valid_loss": float("inf"), "best_train_epoch": -1, "best_valid_epoch": -1, "batch_idx_train": 0, "log_interval": 10, "reset_interval": 200, "valid_interval": 1000, "beam_size": 10, "reduction": "sum", "use_double_scores": True, "env_info": get_env_info(), } ) return params def load_checkpoint_if_available( params: AttributeDict, model: nn.Module, optimizer: Optional[torch.optim.Optimizer] = None, scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, ) -> None: """Load checkpoint from file. If params.start_epoch is positive, it will load the checkpoint from `params.start_epoch - 1`. Otherwise, this function does nothing. Apart from loading state dict for `model`, `optimizer` and `scheduler`, it also updates `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, and `best_valid_loss` in `params`. Args: params: The return value of :func:`get_params`. model: The training model. optimizer: The optimizer that we are using. scheduler: The learning rate scheduler we are using. Returns: Return None. """ if params.start_epoch <= 0: return filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" saved_params = load_checkpoint( filename, model=model, optimizer=optimizer, scheduler=scheduler, ) keys = [ "best_train_epoch", "best_valid_epoch", "batch_idx_train", "best_train_loss", "best_valid_loss", ] for k in keys: params[k] = saved_params[k] return saved_params # MASKED: save_checkpoint function (lines 232-265) def compute_loss( params: AttributeDict, model: nn.Module, batch: dict, graph_compiler: CtcTrainingGraphCompiler, is_training: bool, ) -> Tuple[Tensor, MetricsTracker]: """ Compute CTC loss given the model and its inputs. Args: params: Parameters for training. See :func:`get_params`. model: The model for training. It is an instance of TdnnLstm in our case. batch: A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` for the content in it. graph_compiler: It is used to build a decoding graph from a ctc topo and training transcript. The training transcript is contained in the given `batch`, while the ctc topo is built when this compiler is instantiated. is_training: True for training. False for validation. When it is True, this function enables autograd during computation; when it is False, it disables autograd. """ device = graph_compiler.device feature = batch["inputs"] # at entry, feature is (N, T, C) feature = feature.permute(0, 2, 1) # now feature is (N, C, T) assert feature.ndim == 3 feature = feature.to(device) with torch.set_grad_enabled(is_training): nnet_output = model(feature) # nnet_output is (N, T, C) # NOTE: We need `encode_supervisions` to sort sequences with # different duration in decreasing order, required by # `k2.intersect_dense` called in `k2.ctc_loss` supervisions = batch["supervisions"] supervision_segments, texts = encode_supervisions( supervisions, subsampling_factor=params.subsampling_factor ) decoding_graph = graph_compiler.compile(texts) dense_fsa_vec = k2.DenseFsaVec( nnet_output, supervision_segments, allow_truncate=params.subsampling_factor - 1, ) loss = k2.ctc_loss( decoding_graph=decoding_graph, dense_fsa_vec=dense_fsa_vec, output_beam=params.beam_size, reduction=params.reduction, use_double_scores=params.use_double_scores, ) assert loss.requires_grad == is_training info = MetricsTracker() info["frames"] = supervision_segments[:, 2].sum().item() info["loss"] = loss.detach().cpu().item() return loss, info def compute_validation_loss( params: AttributeDict, model: nn.Module, graph_compiler: CtcTrainingGraphCompiler, valid_dl: torch.utils.data.DataLoader, world_size: int = 1, ) -> MetricsTracker: """Run the validation process. The validation loss is saved in `params.valid_loss`. """ model.eval() tot_loss = MetricsTracker() for batch_idx, batch in enumerate(valid_dl): loss, loss_info = compute_loss( params=params, model=model, batch=batch, graph_compiler=graph_compiler, is_training=False, ) assert loss.requires_grad is False tot_loss = tot_loss + loss_info if world_size > 1: tot_loss.reduce(loss.device) loss_value = tot_loss["loss"] / tot_loss["frames"] if loss_value < params.best_valid_loss: params.best_valid_epoch = params.cur_epoch params.best_valid_loss = loss_value return tot_loss def train_one_epoch( params: AttributeDict, model: nn.Module, optimizer: torch.optim.Optimizer, graph_compiler: CtcTrainingGraphCompiler, train_dl: torch.utils.data.DataLoader, valid_dl: torch.utils.data.DataLoader, tb_writer: Optional[SummaryWriter] = None, world_size: int = 1, ) -> None: """Train the model for one epoch. The training loss from the mean of all frames is saved in `params.train_loss`. It runs the validation process every `params.valid_interval` batches. Args: params: It is returned by :func:`get_params`. model: The model for training. optimizer: The optimizer we are using. graph_compiler: It is used to convert transcripts to FSAs. train_dl: Dataloader for the training dataset. valid_dl: Dataloader for the validation dataset. tb_writer: Writer to write log messages to tensorboard. world_size: Number of nodes in DDP training. If it is 1, DDP is disabled. """ model.train() tot_loss = MetricsTracker() for batch_idx, batch in enumerate(train_dl): params.batch_idx_train += 1 batch_size = len(batch["supervisions"]["text"]) loss, loss_info = compute_loss( params=params, model=model, batch=batch, graph_compiler=graph_compiler, is_training=True, ) # summary stats. tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info optimizer.zero_grad() loss.backward() clip_grad_norm_(model.parameters(), 5.0, 2.0) optimizer.step() if batch_idx % params.log_interval == 0: logging.info( f"Epoch {params.cur_epoch}, " f"batch {batch_idx}, loss[{loss_info}], " f"tot_loss[{tot_loss}], batch size: {batch_size}" ) if batch_idx % params.log_interval == 0: if tb_writer is not None: loss_info.write_summary( tb_writer, "train/current_", params.batch_idx_train ) tot_loss.write_summary( tb_writer, "train/tot_", params.batch_idx_train ) if batch_idx > 0 and batch_idx % params.valid_interval == 0: valid_info = compute_validation_loss( params=params, model=model, graph_compiler=graph_compiler, valid_dl=valid_dl, world_size=world_size, ) model.train() logging.info(f"Epoch {params.cur_epoch}, validation {valid_info}") if tb_writer is not None: valid_info.write_summary( tb_writer, "train/valid_", params.batch_idx_train, ) loss_value = tot_loss["loss"] / tot_loss["frames"] params.train_loss = loss_value if params.train_loss < params.best_train_loss: params.best_train_epoch = params.cur_epoch params.best_train_loss = params.train_loss def run(rank, world_size, args): """ Args: rank: It is a value between 0 and `world_size-1`, which is passed automatically by `mp.spawn()` in :func:`main`. The node with rank 0 is responsible for saving checkpoint. world_size: Number of GPUs for DDP training. args: The return value of get_parser().parse_args() """ params = get_params() params.update(vars(args)) fix_random_seed(42) if world_size > 1: setup_dist(rank, world_size, params.master_port) setup_logger(f"{params.exp_dir}/log/log-train") logging.info("Training started") logging.info(params) if args.tensorboard and rank == 0: tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") else: tb_writer = None lexicon = Lexicon(params.lang_dir) max_phone_id = max(lexicon.tokens) device = torch.device("cpu") if torch.cuda.is_available(): device = torch.device("cuda", rank) graph_compiler = CtcTrainingGraphCompiler(lexicon=lexicon, device=device) model = TdnnLstm( num_features=params.feature_dim, num_classes=max_phone_id + 1, # +1 for the blank symbol subsampling_factor=params.subsampling_factor, ) checkpoints = load_checkpoint_if_available(params=params, model=model) model.to(device) if world_size > 1: model = DDP(model, device_ids=[rank]) optimizer = optim.AdamW( model.parameters(), lr=params.lr, weight_decay=params.weight_decay, ) scheduler = StepLR(optimizer, step_size=8, gamma=0.1) if checkpoints: optimizer.load_state_dict(checkpoints["optimizer"]) scheduler.load_state_dict(checkpoints["scheduler"]) librispeech = LibriSpeechAsrDataModule(args) train_dl = librispeech.train_dataloaders() valid_dl = librispeech.valid_dataloaders() for epoch in range(params.start_epoch, params.num_epochs): train_dl.sampler.set_epoch(epoch) if epoch > params.start_epoch: logging.info(f"epoch {epoch}, lr: {scheduler.get_last_lr()[0]}") if tb_writer is not None: tb_writer.add_scalar( "train/lr", scheduler.get_last_lr()[0], params.batch_idx_train, ) tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) params.cur_epoch = epoch train_one_epoch( params=params, model=model, optimizer=optimizer, graph_compiler=graph_compiler, train_dl=train_dl, valid_dl=valid_dl, tb_writer=tb_writer, world_size=world_size, ) scheduler.step() save_checkpoint( params=params, model=model, optimizer=optimizer, scheduler=scheduler, rank=rank, ) logging.info("Done!") if world_size > 1: torch.distributed.barrier() cleanup_dist() def main(): parser = get_parser() LibriSpeechAsrDataModule.add_arguments(parser) args = parser.parse_args() world_size = args.world_size assert world_size >= 1 if world_size > 1: mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) else: run(rank=0, world_size=1, args=args) if __name__ == "__main__": main()
def save_checkpoint( params: AttributeDict, model: nn.Module, optimizer: torch.optim.Optimizer, scheduler: torch.optim.lr_scheduler._LRScheduler, rank: int = 0, ) -> None: """Save model, optimizer, scheduler and training stats to file. Args: params: It is returned by :func:`get_params`. model: The training model. """ if rank != 0: return filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" save_checkpoint_impl( filename=filename, model=model, params=params, optimizer=optimizer, scheduler=scheduler, rank=rank, ) if params.best_train_epoch == params.cur_epoch: best_train_filename = params.exp_dir / "best-train-loss.pt" copyfile(src=filename, dst=best_train_filename) if params.best_valid_epoch == params.cur_epoch: best_valid_filename = params.exp_dir / "best-valid-loss.pt" copyfile(src=filename, dst=best_valid_filename)
232
265
#!/usr/bin/env python3 # Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang # Mingshuang Luo) # # See ../../../../LICENSE for clarification regarding multiple authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging from pathlib import Path from shutil import copyfile from typing import Optional, Tuple import k2 import torch import torch.multiprocessing as mp import torch.nn as nn import torch.optim as optim from asr_datamodule import LibriSpeechAsrDataModule from lhotse.utils import fix_random_seed from model import TdnnLstm from torch import Tensor from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.utils import clip_grad_norm_ from torch.optim.lr_scheduler import StepLR from torch.utils.tensorboard import SummaryWriter from icefall.checkpoint import load_checkpoint from icefall.checkpoint import save_checkpoint as save_checkpoint_impl from icefall.dist import cleanup_dist, setup_dist from icefall.graph_compiler import CtcTrainingGraphCompiler from icefall.lexicon import Lexicon from icefall.utils import ( AttributeDict, MetricsTracker, encode_supervisions, get_env_info, setup_logger, str2bool, ) def get_parser(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument( "--world-size", type=int, default=1, help="Number of GPUs for DDP training.", ) parser.add_argument( "--master-port", type=int, default=12354, help="Master port to use for DDP training.", ) parser.add_argument( "--tensorboard", type=str2bool, default=True, help="Should various information be logged in tensorboard.", ) parser.add_argument( "--num-epochs", type=int, default=20, help="Number of epochs to train.", ) parser.add_argument( "--start-epoch", type=int, default=0, help="""Resume training from from this epoch. If it is positive, it will load checkpoint from tdnn_lstm_ctc/exp/epoch-{start_epoch-1}.pt """, ) return parser def get_params() -> AttributeDict: """Return a dict containing training parameters. All training related parameters that are not passed from the commandline is saved in the variable `params`. Commandline options are merged into `params` after they are parsed, so you can also access them via `params`. Explanation of options saved in `params`: - exp_dir: It specifies the directory where all training related files, e.g., checkpoints, log, etc, are saved - lang_dir: It contains language related input files such as "lexicon.txt" - lr: It specifies the initial learning rate - feature_dim: The model input dim. It has to match the one used in computing features. - weight_decay: The weight_decay for the optimizer. - subsampling_factor: The subsampling factor for the model. - best_train_loss: Best training loss so far. It is used to select the model that has the lowest training loss. It is updated during the training. - best_valid_loss: Best validation loss so far. It is used to select the model that has the lowest validation loss. It is updated during the training. - best_train_epoch: It is the epoch that has the best training loss. - best_valid_epoch: It is the epoch that has the best validation loss. - batch_idx_train: Used to writing statistics to tensorboard. It contains number of batches trained so far across epochs. - log_interval: Print training loss if batch_idx % log_interval` is 0 - reset_interval: Reset statistics if batch_idx % reset_interval is 0 - valid_interval: Run validation if batch_idx % valid_interval` is 0 - beam_size: It is used in k2.ctc_loss - reduction: It is used in k2.ctc_loss - use_double_scores: It is used in k2.ctc_loss """ params = AttributeDict( { "exp_dir": Path("tdnn_lstm_ctc/exp"), "lang_dir": Path("data/lang_phone"), "lr": 1e-3, "feature_dim": 80, "weight_decay": 5e-4, "subsampling_factor": 3, "best_train_loss": float("inf"), "best_valid_loss": float("inf"), "best_train_epoch": -1, "best_valid_epoch": -1, "batch_idx_train": 0, "log_interval": 10, "reset_interval": 200, "valid_interval": 1000, "beam_size": 10, "reduction": "sum", "use_double_scores": True, "env_info": get_env_info(), } ) return params def load_checkpoint_if_available( params: AttributeDict, model: nn.Module, optimizer: Optional[torch.optim.Optimizer] = None, scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, ) -> None: """Load checkpoint from file. If params.start_epoch is positive, it will load the checkpoint from `params.start_epoch - 1`. Otherwise, this function does nothing. Apart from loading state dict for `model`, `optimizer` and `scheduler`, it also updates `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, and `best_valid_loss` in `params`. Args: params: The return value of :func:`get_params`. model: The training model. optimizer: The optimizer that we are using. scheduler: The learning rate scheduler we are using. Returns: Return None. """ if params.start_epoch <= 0: return filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" saved_params = load_checkpoint( filename, model=model, optimizer=optimizer, scheduler=scheduler, ) keys = [ "best_train_epoch", "best_valid_epoch", "batch_idx_train", "best_train_loss", "best_valid_loss", ] for k in keys: params[k] = saved_params[k] return saved_params def save_checkpoint( params: AttributeDict, model: nn.Module, optimizer: torch.optim.Optimizer, scheduler: torch.optim.lr_scheduler._LRScheduler, rank: int = 0, ) -> None: """Save model, optimizer, scheduler and training stats to file. Args: params: It is returned by :func:`get_params`. model: The training model. """ if rank != 0: return filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" save_checkpoint_impl( filename=filename, model=model, params=params, optimizer=optimizer, scheduler=scheduler, rank=rank, ) if params.best_train_epoch == params.cur_epoch: best_train_filename = params.exp_dir / "best-train-loss.pt" copyfile(src=filename, dst=best_train_filename) if params.best_valid_epoch == params.cur_epoch: best_valid_filename = params.exp_dir / "best-valid-loss.pt" copyfile(src=filename, dst=best_valid_filename) def compute_loss( params: AttributeDict, model: nn.Module, batch: dict, graph_compiler: CtcTrainingGraphCompiler, is_training: bool, ) -> Tuple[Tensor, MetricsTracker]: """ Compute CTC loss given the model and its inputs. Args: params: Parameters for training. See :func:`get_params`. model: The model for training. It is an instance of TdnnLstm in our case. batch: A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` for the content in it. graph_compiler: It is used to build a decoding graph from a ctc topo and training transcript. The training transcript is contained in the given `batch`, while the ctc topo is built when this compiler is instantiated. is_training: True for training. False for validation. When it is True, this function enables autograd during computation; when it is False, it disables autograd. """ device = graph_compiler.device feature = batch["inputs"] # at entry, feature is (N, T, C) feature = feature.permute(0, 2, 1) # now feature is (N, C, T) assert feature.ndim == 3 feature = feature.to(device) with torch.set_grad_enabled(is_training): nnet_output = model(feature) # nnet_output is (N, T, C) # NOTE: We need `encode_supervisions` to sort sequences with # different duration in decreasing order, required by # `k2.intersect_dense` called in `k2.ctc_loss` supervisions = batch["supervisions"] supervision_segments, texts = encode_supervisions( supervisions, subsampling_factor=params.subsampling_factor ) decoding_graph = graph_compiler.compile(texts) dense_fsa_vec = k2.DenseFsaVec( nnet_output, supervision_segments, allow_truncate=params.subsampling_factor - 1, ) loss = k2.ctc_loss( decoding_graph=decoding_graph, dense_fsa_vec=dense_fsa_vec, output_beam=params.beam_size, reduction=params.reduction, use_double_scores=params.use_double_scores, ) assert loss.requires_grad == is_training info = MetricsTracker() info["frames"] = supervision_segments[:, 2].sum().item() info["loss"] = loss.detach().cpu().item() return loss, info def compute_validation_loss( params: AttributeDict, model: nn.Module, graph_compiler: CtcTrainingGraphCompiler, valid_dl: torch.utils.data.DataLoader, world_size: int = 1, ) -> MetricsTracker: """Run the validation process. The validation loss is saved in `params.valid_loss`. """ model.eval() tot_loss = MetricsTracker() for batch_idx, batch in enumerate(valid_dl): loss, loss_info = compute_loss( params=params, model=model, batch=batch, graph_compiler=graph_compiler, is_training=False, ) assert loss.requires_grad is False tot_loss = tot_loss + loss_info if world_size > 1: tot_loss.reduce(loss.device) loss_value = tot_loss["loss"] / tot_loss["frames"] if loss_value < params.best_valid_loss: params.best_valid_epoch = params.cur_epoch params.best_valid_loss = loss_value return tot_loss def train_one_epoch( params: AttributeDict, model: nn.Module, optimizer: torch.optim.Optimizer, graph_compiler: CtcTrainingGraphCompiler, train_dl: torch.utils.data.DataLoader, valid_dl: torch.utils.data.DataLoader, tb_writer: Optional[SummaryWriter] = None, world_size: int = 1, ) -> None: """Train the model for one epoch. The training loss from the mean of all frames is saved in `params.train_loss`. It runs the validation process every `params.valid_interval` batches. Args: params: It is returned by :func:`get_params`. model: The model for training. optimizer: The optimizer we are using. graph_compiler: It is used to convert transcripts to FSAs. train_dl: Dataloader for the training dataset. valid_dl: Dataloader for the validation dataset. tb_writer: Writer to write log messages to tensorboard. world_size: Number of nodes in DDP training. If it is 1, DDP is disabled. """ model.train() tot_loss = MetricsTracker() for batch_idx, batch in enumerate(train_dl): params.batch_idx_train += 1 batch_size = len(batch["supervisions"]["text"]) loss, loss_info = compute_loss( params=params, model=model, batch=batch, graph_compiler=graph_compiler, is_training=True, ) # summary stats. tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info optimizer.zero_grad() loss.backward() clip_grad_norm_(model.parameters(), 5.0, 2.0) optimizer.step() if batch_idx % params.log_interval == 0: logging.info( f"Epoch {params.cur_epoch}, " f"batch {batch_idx}, loss[{loss_info}], " f"tot_loss[{tot_loss}], batch size: {batch_size}" ) if batch_idx % params.log_interval == 0: if tb_writer is not None: loss_info.write_summary( tb_writer, "train/current_", params.batch_idx_train ) tot_loss.write_summary( tb_writer, "train/tot_", params.batch_idx_train ) if batch_idx > 0 and batch_idx % params.valid_interval == 0: valid_info = compute_validation_loss( params=params, model=model, graph_compiler=graph_compiler, valid_dl=valid_dl, world_size=world_size, ) model.train() logging.info(f"Epoch {params.cur_epoch}, validation {valid_info}") if tb_writer is not None: valid_info.write_summary( tb_writer, "train/valid_", params.batch_idx_train, ) loss_value = tot_loss["loss"] / tot_loss["frames"] params.train_loss = loss_value if params.train_loss < params.best_train_loss: params.best_train_epoch = params.cur_epoch params.best_train_loss = params.train_loss def run(rank, world_size, args): """ Args: rank: It is a value between 0 and `world_size-1`, which is passed automatically by `mp.spawn()` in :func:`main`. The node with rank 0 is responsible for saving checkpoint. world_size: Number of GPUs for DDP training. args: The return value of get_parser().parse_args() """ params = get_params() params.update(vars(args)) fix_random_seed(42) if world_size > 1: setup_dist(rank, world_size, params.master_port) setup_logger(f"{params.exp_dir}/log/log-train") logging.info("Training started") logging.info(params) if args.tensorboard and rank == 0: tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") else: tb_writer = None lexicon = Lexicon(params.lang_dir) max_phone_id = max(lexicon.tokens) device = torch.device("cpu") if torch.cuda.is_available(): device = torch.device("cuda", rank) graph_compiler = CtcTrainingGraphCompiler(lexicon=lexicon, device=device) model = TdnnLstm( num_features=params.feature_dim, num_classes=max_phone_id + 1, # +1 for the blank symbol subsampling_factor=params.subsampling_factor, ) checkpoints = load_checkpoint_if_available(params=params, model=model) model.to(device) if world_size > 1: model = DDP(model, device_ids=[rank]) optimizer = optim.AdamW( model.parameters(), lr=params.lr, weight_decay=params.weight_decay, ) scheduler = StepLR(optimizer, step_size=8, gamma=0.1) if checkpoints: optimizer.load_state_dict(checkpoints["optimizer"]) scheduler.load_state_dict(checkpoints["scheduler"]) librispeech = LibriSpeechAsrDataModule(args) train_dl = librispeech.train_dataloaders() valid_dl = librispeech.valid_dataloaders() for epoch in range(params.start_epoch, params.num_epochs): train_dl.sampler.set_epoch(epoch) if epoch > params.start_epoch: logging.info(f"epoch {epoch}, lr: {scheduler.get_last_lr()[0]}") if tb_writer is not None: tb_writer.add_scalar( "train/lr", scheduler.get_last_lr()[0], params.batch_idx_train, ) tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) params.cur_epoch = epoch train_one_epoch( params=params, model=model, optimizer=optimizer, graph_compiler=graph_compiler, train_dl=train_dl, valid_dl=valid_dl, tb_writer=tb_writer, world_size=world_size, ) scheduler.step() save_checkpoint( params=params, model=model, optimizer=optimizer, scheduler=scheduler, rank=rank, ) logging.info("Done!") if world_size > 1: torch.distributed.barrier() cleanup_dist() def main(): parser = get_parser() LibriSpeechAsrDataModule.add_arguments(parser) args = parser.parse_args() world_size = args.world_size assert world_size >= 1 if world_size > 1: mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) else: run(rank=0, world_size=1, args=args) if __name__ == "__main__": main()
compute_validation_loss
Run the validation process. The validation loss is saved in `params.valid_loss`.
#!/usr/bin/env python3 # Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang # Mingshuang Luo) # # See ../../../../LICENSE for clarification regarding multiple authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging from pathlib import Path from shutil import copyfile from typing import Optional, Tuple import k2 import torch import torch.multiprocessing as mp import torch.nn as nn import torch.optim as optim from asr_datamodule import LibriSpeechAsrDataModule from lhotse.utils import fix_random_seed from model import TdnnLstm from torch import Tensor from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.utils import clip_grad_norm_ from torch.optim.lr_scheduler import StepLR from torch.utils.tensorboard import SummaryWriter from icefall.checkpoint import load_checkpoint from icefall.checkpoint import save_checkpoint as save_checkpoint_impl from icefall.dist import cleanup_dist, setup_dist from icefall.graph_compiler import CtcTrainingGraphCompiler from icefall.lexicon import Lexicon from icefall.utils import ( AttributeDict, MetricsTracker, encode_supervisions, get_env_info, setup_logger, str2bool, ) def get_parser(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument( "--world-size", type=int, default=1, help="Number of GPUs for DDP training.", ) parser.add_argument( "--master-port", type=int, default=12354, help="Master port to use for DDP training.", ) parser.add_argument( "--tensorboard", type=str2bool, default=True, help="Should various information be logged in tensorboard.", ) parser.add_argument( "--num-epochs", type=int, default=20, help="Number of epochs to train.", ) parser.add_argument( "--start-epoch", type=int, default=0, help="""Resume training from from this epoch. If it is positive, it will load checkpoint from tdnn_lstm_ctc/exp/epoch-{start_epoch-1}.pt """, ) return parser def get_params() -> AttributeDict: """Return a dict containing training parameters. All training related parameters that are not passed from the commandline is saved in the variable `params`. Commandline options are merged into `params` after they are parsed, so you can also access them via `params`. Explanation of options saved in `params`: - exp_dir: It specifies the directory where all training related files, e.g., checkpoints, log, etc, are saved - lang_dir: It contains language related input files such as "lexicon.txt" - lr: It specifies the initial learning rate - feature_dim: The model input dim. It has to match the one used in computing features. - weight_decay: The weight_decay for the optimizer. - subsampling_factor: The subsampling factor for the model. - best_train_loss: Best training loss so far. It is used to select the model that has the lowest training loss. It is updated during the training. - best_valid_loss: Best validation loss so far. It is used to select the model that has the lowest validation loss. It is updated during the training. - best_train_epoch: It is the epoch that has the best training loss. - best_valid_epoch: It is the epoch that has the best validation loss. - batch_idx_train: Used to writing statistics to tensorboard. It contains number of batches trained so far across epochs. - log_interval: Print training loss if batch_idx % log_interval` is 0 - reset_interval: Reset statistics if batch_idx % reset_interval is 0 - valid_interval: Run validation if batch_idx % valid_interval` is 0 - beam_size: It is used in k2.ctc_loss - reduction: It is used in k2.ctc_loss - use_double_scores: It is used in k2.ctc_loss """ params = AttributeDict( { "exp_dir": Path("tdnn_lstm_ctc/exp"), "lang_dir": Path("data/lang_phone"), "lr": 1e-3, "feature_dim": 80, "weight_decay": 5e-4, "subsampling_factor": 3, "best_train_loss": float("inf"), "best_valid_loss": float("inf"), "best_train_epoch": -1, "best_valid_epoch": -1, "batch_idx_train": 0, "log_interval": 10, "reset_interval": 200, "valid_interval": 1000, "beam_size": 10, "reduction": "sum", "use_double_scores": True, "env_info": get_env_info(), } ) return params def load_checkpoint_if_available( params: AttributeDict, model: nn.Module, optimizer: Optional[torch.optim.Optimizer] = None, scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, ) -> None: """Load checkpoint from file. If params.start_epoch is positive, it will load the checkpoint from `params.start_epoch - 1`. Otherwise, this function does nothing. Apart from loading state dict for `model`, `optimizer` and `scheduler`, it also updates `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, and `best_valid_loss` in `params`. Args: params: The return value of :func:`get_params`. model: The training model. optimizer: The optimizer that we are using. scheduler: The learning rate scheduler we are using. Returns: Return None. """ if params.start_epoch <= 0: return filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" saved_params = load_checkpoint( filename, model=model, optimizer=optimizer, scheduler=scheduler, ) keys = [ "best_train_epoch", "best_valid_epoch", "batch_idx_train", "best_train_loss", "best_valid_loss", ] for k in keys: params[k] = saved_params[k] return saved_params def save_checkpoint( params: AttributeDict, model: nn.Module, optimizer: torch.optim.Optimizer, scheduler: torch.optim.lr_scheduler._LRScheduler, rank: int = 0, ) -> None: """Save model, optimizer, scheduler and training stats to file. Args: params: It is returned by :func:`get_params`. model: The training model. """ if rank != 0: return filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" save_checkpoint_impl( filename=filename, model=model, params=params, optimizer=optimizer, scheduler=scheduler, rank=rank, ) if params.best_train_epoch == params.cur_epoch: best_train_filename = params.exp_dir / "best-train-loss.pt" copyfile(src=filename, dst=best_train_filename) if params.best_valid_epoch == params.cur_epoch: best_valid_filename = params.exp_dir / "best-valid-loss.pt" copyfile(src=filename, dst=best_valid_filename) def compute_loss( params: AttributeDict, model: nn.Module, batch: dict, graph_compiler: CtcTrainingGraphCompiler, is_training: bool, ) -> Tuple[Tensor, MetricsTracker]: """ Compute CTC loss given the model and its inputs. Args: params: Parameters for training. See :func:`get_params`. model: The model for training. It is an instance of TdnnLstm in our case. batch: A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` for the content in it. graph_compiler: It is used to build a decoding graph from a ctc topo and training transcript. The training transcript is contained in the given `batch`, while the ctc topo is built when this compiler is instantiated. is_training: True for training. False for validation. When it is True, this function enables autograd during computation; when it is False, it disables autograd. """ device = graph_compiler.device feature = batch["inputs"] # at entry, feature is (N, T, C) feature = feature.permute(0, 2, 1) # now feature is (N, C, T) assert feature.ndim == 3 feature = feature.to(device) with torch.set_grad_enabled(is_training): nnet_output = model(feature) # nnet_output is (N, T, C) # NOTE: We need `encode_supervisions` to sort sequences with # different duration in decreasing order, required by # `k2.intersect_dense` called in `k2.ctc_loss` supervisions = batch["supervisions"] supervision_segments, texts = encode_supervisions( supervisions, subsampling_factor=params.subsampling_factor ) decoding_graph = graph_compiler.compile(texts) dense_fsa_vec = k2.DenseFsaVec( nnet_output, supervision_segments, allow_truncate=params.subsampling_factor - 1, ) loss = k2.ctc_loss( decoding_graph=decoding_graph, dense_fsa_vec=dense_fsa_vec, output_beam=params.beam_size, reduction=params.reduction, use_double_scores=params.use_double_scores, ) assert loss.requires_grad == is_training info = MetricsTracker() info["frames"] = supervision_segments[:, 2].sum().item() info["loss"] = loss.detach().cpu().item() return loss, info # MASKED: compute_validation_loss function (lines 338-373) def train_one_epoch( params: AttributeDict, model: nn.Module, optimizer: torch.optim.Optimizer, graph_compiler: CtcTrainingGraphCompiler, train_dl: torch.utils.data.DataLoader, valid_dl: torch.utils.data.DataLoader, tb_writer: Optional[SummaryWriter] = None, world_size: int = 1, ) -> None: """Train the model for one epoch. The training loss from the mean of all frames is saved in `params.train_loss`. It runs the validation process every `params.valid_interval` batches. Args: params: It is returned by :func:`get_params`. model: The model for training. optimizer: The optimizer we are using. graph_compiler: It is used to convert transcripts to FSAs. train_dl: Dataloader for the training dataset. valid_dl: Dataloader for the validation dataset. tb_writer: Writer to write log messages to tensorboard. world_size: Number of nodes in DDP training. If it is 1, DDP is disabled. """ model.train() tot_loss = MetricsTracker() for batch_idx, batch in enumerate(train_dl): params.batch_idx_train += 1 batch_size = len(batch["supervisions"]["text"]) loss, loss_info = compute_loss( params=params, model=model, batch=batch, graph_compiler=graph_compiler, is_training=True, ) # summary stats. tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info optimizer.zero_grad() loss.backward() clip_grad_norm_(model.parameters(), 5.0, 2.0) optimizer.step() if batch_idx % params.log_interval == 0: logging.info( f"Epoch {params.cur_epoch}, " f"batch {batch_idx}, loss[{loss_info}], " f"tot_loss[{tot_loss}], batch size: {batch_size}" ) if batch_idx % params.log_interval == 0: if tb_writer is not None: loss_info.write_summary( tb_writer, "train/current_", params.batch_idx_train ) tot_loss.write_summary( tb_writer, "train/tot_", params.batch_idx_train ) if batch_idx > 0 and batch_idx % params.valid_interval == 0: valid_info = compute_validation_loss( params=params, model=model, graph_compiler=graph_compiler, valid_dl=valid_dl, world_size=world_size, ) model.train() logging.info(f"Epoch {params.cur_epoch}, validation {valid_info}") if tb_writer is not None: valid_info.write_summary( tb_writer, "train/valid_", params.batch_idx_train, ) loss_value = tot_loss["loss"] / tot_loss["frames"] params.train_loss = loss_value if params.train_loss < params.best_train_loss: params.best_train_epoch = params.cur_epoch params.best_train_loss = params.train_loss def run(rank, world_size, args): """ Args: rank: It is a value between 0 and `world_size-1`, which is passed automatically by `mp.spawn()` in :func:`main`. The node with rank 0 is responsible for saving checkpoint. world_size: Number of GPUs for DDP training. args: The return value of get_parser().parse_args() """ params = get_params() params.update(vars(args)) fix_random_seed(42) if world_size > 1: setup_dist(rank, world_size, params.master_port) setup_logger(f"{params.exp_dir}/log/log-train") logging.info("Training started") logging.info(params) if args.tensorboard and rank == 0: tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") else: tb_writer = None lexicon = Lexicon(params.lang_dir) max_phone_id = max(lexicon.tokens) device = torch.device("cpu") if torch.cuda.is_available(): device = torch.device("cuda", rank) graph_compiler = CtcTrainingGraphCompiler(lexicon=lexicon, device=device) model = TdnnLstm( num_features=params.feature_dim, num_classes=max_phone_id + 1, # +1 for the blank symbol subsampling_factor=params.subsampling_factor, ) checkpoints = load_checkpoint_if_available(params=params, model=model) model.to(device) if world_size > 1: model = DDP(model, device_ids=[rank]) optimizer = optim.AdamW( model.parameters(), lr=params.lr, weight_decay=params.weight_decay, ) scheduler = StepLR(optimizer, step_size=8, gamma=0.1) if checkpoints: optimizer.load_state_dict(checkpoints["optimizer"]) scheduler.load_state_dict(checkpoints["scheduler"]) librispeech = LibriSpeechAsrDataModule(args) train_dl = librispeech.train_dataloaders() valid_dl = librispeech.valid_dataloaders() for epoch in range(params.start_epoch, params.num_epochs): train_dl.sampler.set_epoch(epoch) if epoch > params.start_epoch: logging.info(f"epoch {epoch}, lr: {scheduler.get_last_lr()[0]}") if tb_writer is not None: tb_writer.add_scalar( "train/lr", scheduler.get_last_lr()[0], params.batch_idx_train, ) tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) params.cur_epoch = epoch train_one_epoch( params=params, model=model, optimizer=optimizer, graph_compiler=graph_compiler, train_dl=train_dl, valid_dl=valid_dl, tb_writer=tb_writer, world_size=world_size, ) scheduler.step() save_checkpoint( params=params, model=model, optimizer=optimizer, scheduler=scheduler, rank=rank, ) logging.info("Done!") if world_size > 1: torch.distributed.barrier() cleanup_dist() def main(): parser = get_parser() LibriSpeechAsrDataModule.add_arguments(parser) args = parser.parse_args() world_size = args.world_size assert world_size >= 1 if world_size > 1: mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) else: run(rank=0, world_size=1, args=args) if __name__ == "__main__": main()
def compute_validation_loss( params: AttributeDict, model: nn.Module, graph_compiler: CtcTrainingGraphCompiler, valid_dl: torch.utils.data.DataLoader, world_size: int = 1, ) -> MetricsTracker: """Run the validation process. The validation loss is saved in `params.valid_loss`. """ model.eval() tot_loss = MetricsTracker() for batch_idx, batch in enumerate(valid_dl): loss, loss_info = compute_loss( params=params, model=model, batch=batch, graph_compiler=graph_compiler, is_training=False, ) assert loss.requires_grad is False tot_loss = tot_loss + loss_info if world_size > 1: tot_loss.reduce(loss.device) loss_value = tot_loss["loss"] / tot_loss["frames"] if loss_value < params.best_valid_loss: params.best_valid_epoch = params.cur_epoch params.best_valid_loss = loss_value return tot_loss
338
373
#!/usr/bin/env python3 # Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang # Mingshuang Luo) # # See ../../../../LICENSE for clarification regarding multiple authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging from pathlib import Path from shutil import copyfile from typing import Optional, Tuple import k2 import torch import torch.multiprocessing as mp import torch.nn as nn import torch.optim as optim from asr_datamodule import LibriSpeechAsrDataModule from lhotse.utils import fix_random_seed from model import TdnnLstm from torch import Tensor from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.utils import clip_grad_norm_ from torch.optim.lr_scheduler import StepLR from torch.utils.tensorboard import SummaryWriter from icefall.checkpoint import load_checkpoint from icefall.checkpoint import save_checkpoint as save_checkpoint_impl from icefall.dist import cleanup_dist, setup_dist from icefall.graph_compiler import CtcTrainingGraphCompiler from icefall.lexicon import Lexicon from icefall.utils import ( AttributeDict, MetricsTracker, encode_supervisions, get_env_info, setup_logger, str2bool, ) def get_parser(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument( "--world-size", type=int, default=1, help="Number of GPUs for DDP training.", ) parser.add_argument( "--master-port", type=int, default=12354, help="Master port to use for DDP training.", ) parser.add_argument( "--tensorboard", type=str2bool, default=True, help="Should various information be logged in tensorboard.", ) parser.add_argument( "--num-epochs", type=int, default=20, help="Number of epochs to train.", ) parser.add_argument( "--start-epoch", type=int, default=0, help="""Resume training from from this epoch. If it is positive, it will load checkpoint from tdnn_lstm_ctc/exp/epoch-{start_epoch-1}.pt """, ) return parser def get_params() -> AttributeDict: """Return a dict containing training parameters. All training related parameters that are not passed from the commandline is saved in the variable `params`. Commandline options are merged into `params` after they are parsed, so you can also access them via `params`. Explanation of options saved in `params`: - exp_dir: It specifies the directory where all training related files, e.g., checkpoints, log, etc, are saved - lang_dir: It contains language related input files such as "lexicon.txt" - lr: It specifies the initial learning rate - feature_dim: The model input dim. It has to match the one used in computing features. - weight_decay: The weight_decay for the optimizer. - subsampling_factor: The subsampling factor for the model. - best_train_loss: Best training loss so far. It is used to select the model that has the lowest training loss. It is updated during the training. - best_valid_loss: Best validation loss so far. It is used to select the model that has the lowest validation loss. It is updated during the training. - best_train_epoch: It is the epoch that has the best training loss. - best_valid_epoch: It is the epoch that has the best validation loss. - batch_idx_train: Used to writing statistics to tensorboard. It contains number of batches trained so far across epochs. - log_interval: Print training loss if batch_idx % log_interval` is 0 - reset_interval: Reset statistics if batch_idx % reset_interval is 0 - valid_interval: Run validation if batch_idx % valid_interval` is 0 - beam_size: It is used in k2.ctc_loss - reduction: It is used in k2.ctc_loss - use_double_scores: It is used in k2.ctc_loss """ params = AttributeDict( { "exp_dir": Path("tdnn_lstm_ctc/exp"), "lang_dir": Path("data/lang_phone"), "lr": 1e-3, "feature_dim": 80, "weight_decay": 5e-4, "subsampling_factor": 3, "best_train_loss": float("inf"), "best_valid_loss": float("inf"), "best_train_epoch": -1, "best_valid_epoch": -1, "batch_idx_train": 0, "log_interval": 10, "reset_interval": 200, "valid_interval": 1000, "beam_size": 10, "reduction": "sum", "use_double_scores": True, "env_info": get_env_info(), } ) return params def load_checkpoint_if_available( params: AttributeDict, model: nn.Module, optimizer: Optional[torch.optim.Optimizer] = None, scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, ) -> None: """Load checkpoint from file. If params.start_epoch is positive, it will load the checkpoint from `params.start_epoch - 1`. Otherwise, this function does nothing. Apart from loading state dict for `model`, `optimizer` and `scheduler`, it also updates `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, and `best_valid_loss` in `params`. Args: params: The return value of :func:`get_params`. model: The training model. optimizer: The optimizer that we are using. scheduler: The learning rate scheduler we are using. Returns: Return None. """ if params.start_epoch <= 0: return filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" saved_params = load_checkpoint( filename, model=model, optimizer=optimizer, scheduler=scheduler, ) keys = [ "best_train_epoch", "best_valid_epoch", "batch_idx_train", "best_train_loss", "best_valid_loss", ] for k in keys: params[k] = saved_params[k] return saved_params def save_checkpoint( params: AttributeDict, model: nn.Module, optimizer: torch.optim.Optimizer, scheduler: torch.optim.lr_scheduler._LRScheduler, rank: int = 0, ) -> None: """Save model, optimizer, scheduler and training stats to file. Args: params: It is returned by :func:`get_params`. model: The training model. """ if rank != 0: return filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" save_checkpoint_impl( filename=filename, model=model, params=params, optimizer=optimizer, scheduler=scheduler, rank=rank, ) if params.best_train_epoch == params.cur_epoch: best_train_filename = params.exp_dir / "best-train-loss.pt" copyfile(src=filename, dst=best_train_filename) if params.best_valid_epoch == params.cur_epoch: best_valid_filename = params.exp_dir / "best-valid-loss.pt" copyfile(src=filename, dst=best_valid_filename) def compute_loss( params: AttributeDict, model: nn.Module, batch: dict, graph_compiler: CtcTrainingGraphCompiler, is_training: bool, ) -> Tuple[Tensor, MetricsTracker]: """ Compute CTC loss given the model and its inputs. Args: params: Parameters for training. See :func:`get_params`. model: The model for training. It is an instance of TdnnLstm in our case. batch: A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` for the content in it. graph_compiler: It is used to build a decoding graph from a ctc topo and training transcript. The training transcript is contained in the given `batch`, while the ctc topo is built when this compiler is instantiated. is_training: True for training. False for validation. When it is True, this function enables autograd during computation; when it is False, it disables autograd. """ device = graph_compiler.device feature = batch["inputs"] # at entry, feature is (N, T, C) feature = feature.permute(0, 2, 1) # now feature is (N, C, T) assert feature.ndim == 3 feature = feature.to(device) with torch.set_grad_enabled(is_training): nnet_output = model(feature) # nnet_output is (N, T, C) # NOTE: We need `encode_supervisions` to sort sequences with # different duration in decreasing order, required by # `k2.intersect_dense` called in `k2.ctc_loss` supervisions = batch["supervisions"] supervision_segments, texts = encode_supervisions( supervisions, subsampling_factor=params.subsampling_factor ) decoding_graph = graph_compiler.compile(texts) dense_fsa_vec = k2.DenseFsaVec( nnet_output, supervision_segments, allow_truncate=params.subsampling_factor - 1, ) loss = k2.ctc_loss( decoding_graph=decoding_graph, dense_fsa_vec=dense_fsa_vec, output_beam=params.beam_size, reduction=params.reduction, use_double_scores=params.use_double_scores, ) assert loss.requires_grad == is_training info = MetricsTracker() info["frames"] = supervision_segments[:, 2].sum().item() info["loss"] = loss.detach().cpu().item() return loss, info def compute_validation_loss( params: AttributeDict, model: nn.Module, graph_compiler: CtcTrainingGraphCompiler, valid_dl: torch.utils.data.DataLoader, world_size: int = 1, ) -> MetricsTracker: """Run the validation process. The validation loss is saved in `params.valid_loss`. """ model.eval() tot_loss = MetricsTracker() for batch_idx, batch in enumerate(valid_dl): loss, loss_info = compute_loss( params=params, model=model, batch=batch, graph_compiler=graph_compiler, is_training=False, ) assert loss.requires_grad is False tot_loss = tot_loss + loss_info if world_size > 1: tot_loss.reduce(loss.device) loss_value = tot_loss["loss"] / tot_loss["frames"] if loss_value < params.best_valid_loss: params.best_valid_epoch = params.cur_epoch params.best_valid_loss = loss_value return tot_loss def train_one_epoch( params: AttributeDict, model: nn.Module, optimizer: torch.optim.Optimizer, graph_compiler: CtcTrainingGraphCompiler, train_dl: torch.utils.data.DataLoader, valid_dl: torch.utils.data.DataLoader, tb_writer: Optional[SummaryWriter] = None, world_size: int = 1, ) -> None: """Train the model for one epoch. The training loss from the mean of all frames is saved in `params.train_loss`. It runs the validation process every `params.valid_interval` batches. Args: params: It is returned by :func:`get_params`. model: The model for training. optimizer: The optimizer we are using. graph_compiler: It is used to convert transcripts to FSAs. train_dl: Dataloader for the training dataset. valid_dl: Dataloader for the validation dataset. tb_writer: Writer to write log messages to tensorboard. world_size: Number of nodes in DDP training. If it is 1, DDP is disabled. """ model.train() tot_loss = MetricsTracker() for batch_idx, batch in enumerate(train_dl): params.batch_idx_train += 1 batch_size = len(batch["supervisions"]["text"]) loss, loss_info = compute_loss( params=params, model=model, batch=batch, graph_compiler=graph_compiler, is_training=True, ) # summary stats. tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info optimizer.zero_grad() loss.backward() clip_grad_norm_(model.parameters(), 5.0, 2.0) optimizer.step() if batch_idx % params.log_interval == 0: logging.info( f"Epoch {params.cur_epoch}, " f"batch {batch_idx}, loss[{loss_info}], " f"tot_loss[{tot_loss}], batch size: {batch_size}" ) if batch_idx % params.log_interval == 0: if tb_writer is not None: loss_info.write_summary( tb_writer, "train/current_", params.batch_idx_train ) tot_loss.write_summary( tb_writer, "train/tot_", params.batch_idx_train ) if batch_idx > 0 and batch_idx % params.valid_interval == 0: valid_info = compute_validation_loss( params=params, model=model, graph_compiler=graph_compiler, valid_dl=valid_dl, world_size=world_size, ) model.train() logging.info(f"Epoch {params.cur_epoch}, validation {valid_info}") if tb_writer is not None: valid_info.write_summary( tb_writer, "train/valid_", params.batch_idx_train, ) loss_value = tot_loss["loss"] / tot_loss["frames"] params.train_loss = loss_value if params.train_loss < params.best_train_loss: params.best_train_epoch = params.cur_epoch params.best_train_loss = params.train_loss def run(rank, world_size, args): """ Args: rank: It is a value between 0 and `world_size-1`, which is passed automatically by `mp.spawn()` in :func:`main`. The node with rank 0 is responsible for saving checkpoint. world_size: Number of GPUs for DDP training. args: The return value of get_parser().parse_args() """ params = get_params() params.update(vars(args)) fix_random_seed(42) if world_size > 1: setup_dist(rank, world_size, params.master_port) setup_logger(f"{params.exp_dir}/log/log-train") logging.info("Training started") logging.info(params) if args.tensorboard and rank == 0: tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") else: tb_writer = None lexicon = Lexicon(params.lang_dir) max_phone_id = max(lexicon.tokens) device = torch.device("cpu") if torch.cuda.is_available(): device = torch.device("cuda", rank) graph_compiler = CtcTrainingGraphCompiler(lexicon=lexicon, device=device) model = TdnnLstm( num_features=params.feature_dim, num_classes=max_phone_id + 1, # +1 for the blank symbol subsampling_factor=params.subsampling_factor, ) checkpoints = load_checkpoint_if_available(params=params, model=model) model.to(device) if world_size > 1: model = DDP(model, device_ids=[rank]) optimizer = optim.AdamW( model.parameters(), lr=params.lr, weight_decay=params.weight_decay, ) scheduler = StepLR(optimizer, step_size=8, gamma=0.1) if checkpoints: optimizer.load_state_dict(checkpoints["optimizer"]) scheduler.load_state_dict(checkpoints["scheduler"]) librispeech = LibriSpeechAsrDataModule(args) train_dl = librispeech.train_dataloaders() valid_dl = librispeech.valid_dataloaders() for epoch in range(params.start_epoch, params.num_epochs): train_dl.sampler.set_epoch(epoch) if epoch > params.start_epoch: logging.info(f"epoch {epoch}, lr: {scheduler.get_last_lr()[0]}") if tb_writer is not None: tb_writer.add_scalar( "train/lr", scheduler.get_last_lr()[0], params.batch_idx_train, ) tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) params.cur_epoch = epoch train_one_epoch( params=params, model=model, optimizer=optimizer, graph_compiler=graph_compiler, train_dl=train_dl, valid_dl=valid_dl, tb_writer=tb_writer, world_size=world_size, ) scheduler.step() save_checkpoint( params=params, model=model, optimizer=optimizer, scheduler=scheduler, rank=rank, ) logging.info("Done!") if world_size > 1: torch.distributed.barrier() cleanup_dist() def main(): parser = get_parser() LibriSpeechAsrDataModule.add_arguments(parser) args = parser.parse_args() world_size = args.world_size assert world_size >= 1 if world_size > 1: mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) else: run(rank=0, world_size=1, args=args) if __name__ == "__main__": main()
__exists_replicas
Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use.
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result # MASKED: __exists_replicas function (lines 142-176) @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
@read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None
142
176
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
list_bad_replicas_history
List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use.
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result # MASKED: list_bad_replicas_history function (lines 225-246) @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
@read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas
225
246
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
declare_bad_file_replicas
Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use.
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas # MASKED: declare_bad_file_replicas function (lines 465-481) @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
@transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas
465
481
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
list_bad_replicas
List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}.
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas # MASKED: list_bad_replicas function (lines 540-578) @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
@read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows
540
578
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
__bulk_add_file_dids
Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful.
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True # MASKED: __bulk_add_file_dids function (lines 1370-1402) def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
@transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files
1,370
1,402
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
add_replica
Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful.
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas # MASKED: add_replica function (lines 1572-1598) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
@transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session)
1,572
1,598
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
get_source_replicas
Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() # MASKED: get_source_replicas function (lines 2223-2243) @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
@transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()]
2,223
2,243
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
get_source_replicas_for_dataset
Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset)
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) # MASKED: get_source_replicas_for_dataset function (lines 2392-2451) @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
@transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas
2,392
2,451
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
list_dataset_replicas_bulk
:param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica # MASKED: list_dataset_replicas_bulk function (lines 2643-2680) @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
@stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found')
2,643
2,680
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
list_dataset_replicas_vp
List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') # MASKED: list_dataset_replicas_vp function (lines 2683-2734) @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
@stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply}
2,683
2,734
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
list_datasets_per_rse
List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} # MASKED: list_datasets_per_rse function (lines 2737-2790) @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
@stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict()
2,737
2,790
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
get_bad_pfns
Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() # MASKED: get_bad_pfns function (lines 2970-2989) @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
@read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result
2,970
2,989
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
add_bad_pfns
Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful.
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True # MASKED: add_bad_pfns function (lines 3072-3108) @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
@transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True
3,072
3,108
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
list_expired_temporary_unavailable_replicas
List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use.
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True # MASKED: list_expired_temporary_unavailable_replicas function (lines 3111-3130) @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
@read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all()
3,111
3,130
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
get_replicas_state
Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() # MASKED: get_replicas_state function (lines 3133-3151) @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
@read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states
3,133
3,151
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
set_tombstone
Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use.
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result # MASKED: set_tombstone function (lines 3250-3283) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
@transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session)))
3,250
3,283
# -*- coding: utf-8 -*- # Copyright 2013-2021 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <[email protected]>, 2013-2018 # - Cedric Serfon <[email protected]>, 2013-2020 # - Ralph Vigne <[email protected]>, 2013-2014 # - Martin Barisits <[email protected]>, 2013-2021 # - Mario Lassnig <[email protected]>, 2014-2021 # - David Cameron <[email protected]>, 2014 # - Thomas Beermann <[email protected]>, 2014-2021 # - Wen Guan <[email protected]>, 2014-2015 # - Hannes Hansen <[email protected]>, 2018-2019 # - Dimitrios Christidis <[email protected]>, 2019-2021 # - Robert Illingworth <[email protected]>, 2019 # - James Perry <[email protected]>, 2019 # - Jaroslav Guenther <[email protected]>, 2019 # - Andrew Lister <[email protected]>, 2019 # - Ilija Vukotic <[email protected]>, 2020-2021 # - Brandon White <[email protected]>, 2019 # - Tomas Javurek <[email protected]>, 2020 # - Luc Goossens <[email protected]>, 2020 # - Eli Chadwick <[email protected]>, 2020 # - Patrick Austin <[email protected]>, 2020 # - Eric Vaandering <[email protected]>, 2020-2021 # - Benedikt Ziemons <[email protected]>, 2020-2021 # - Radu Carpa <[email protected]>, 2021 # - Gabriele Fronzé <[email protected]>, 2021 from __future__ import print_function import heapq import logging import random from collections import defaultdict from copy import deepcopy from curses.ascii import isprint from datetime import datetime, timedelta from hashlib import sha256 from json import dumps from re import match from struct import unpack from traceback import format_exc import requests from dogpile.cache import make_region from dogpile.cache.api import NO_VALUE from six import string_types from sqlalchemy import func, and_, or_, exists, not_ from sqlalchemy.exc import DatabaseError, IntegrityError from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import FlushError, NoResultFound from sqlalchemy.sql import label from sqlalchemy.sql.expression import case, select, text, false, true import rucio.core.did import rucio.core.lock from rucio.common import exception from rucio.common.types import InternalScope from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query from rucio.core.config import get as config_get from rucio.core.credential import get_signed_url from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses from rucio.core.rse_counter import decrease, increase from rucio.core.rse_expression_parser import parse_expression from rucio.db.sqla import models, filter_thread_work from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability, BadFilesStatus, RuleState, BadPFNStatus) from rucio.db.sqla.session import (read_session, stream_session, transactional_session, DEFAULT_SCHEMA_NAME, BASE) from rucio.rse import rsemanager as rsemgr REGION = make_region().configure('dogpile.cache.memory', expiration_time=60) @read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): """ List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use. """ result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) elif filter: # Ensure we limit results to current VO even if we don't specify an RSE expression for rse in list_rses(filters=filter, session=session): rse_clause.append(models.BadReplicas.rse_id == rse['id']) if session.bind.dialect.name == 'oracle': to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif session.bind.dialect.name == 'mysql': to_days = func.date(models.BadReplicas.created_at) elif session.bind.dialect.name == 'postgresql': to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) # To be added : HINTS if rse_clause != []: query = query.filter(or_(*rse_clause)) if from_date: query = query.filter(models.BadReplicas.created_at > from_date) if to_date: query = query.filter(models.BadReplicas.created_at < to_date) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if (row[2], row[1], row[4]) not in incidents: incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result @read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): """ Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use. """ already_declared = False if path: path_clause = [models.RSEFileAssociation.path == path] if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\ filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\ filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() path, scope, name, rse_id, size = result # Now we check that the replica is not already declared bad query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\ filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return True, scope, name, already_declared, size else: return False, None, None, already_declared, None @read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): """ List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use. """ result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter(models.BadReplicas.state == state) if rse_id: query = query.filter(models.BadReplicas.rse_id == rse_id) if younger_than: query = query.filter(models.BadReplicas.created_at >= younger_than) if older_than: query = query.filter(models.BadReplicas.created_at <= older_than) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if badfile.scope.vo == vo: if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if rse_id in rep['rses'] and rep['rses'][rse_id]: pfn = rep['rses'][rse_id][0] if pfn and pfn not in reps: reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result @read_session def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None): """ List the bad file replicas history. Method only used by necromancer :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name') query = query.limit(limit) bad_replicas = {} for scope, name, rse_id in query.yield_per(1000): if rse_id not in bad_replicas: bad_replicas[rse_id] = [] bad_replicas[rse_id].append({'scope': scope, 'name': name}) return bad_replicas @transactional_session def update_bad_replicas_history(dids, rse_id, session=None): """ Update the bad file replicas history. Method only used by necromancer :param dids: The list of DIDs. :param rse_id: The rse_id. :param session: The database session in use. """ for did in dids: # Check if the replica is still there try: result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one() state = result.state if state == ReplicaState.AVAILABLE: # If yes, and replica state is AVAILABLE, update BadReplicas query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False) elif state != ReplicaState.BAD: # If the replica state is not AVAILABLE check if other replicas for the same file are still there. try: session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one() except NoResultFound: # No replicas are available for this file. Reset the replica state to BAD update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session) session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False) else: # Here that means that the file has not been processed by the necro. Just pass pass except NoResultFound: # We end-up here if the replica is not registered anymore on the RSE try: result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one() # If yes, the final state depends on DIDAvailability state = result.availability final_state = None if state == DIDAvailability.LOST: final_state = BadFilesStatus.LOST elif state == DIDAvailability.DELETED: final_state = BadFilesStatus.DELETED elif state == DIDAvailability.AVAILABLE: final_state = BadFilesStatus.DELETED else: # For completness, it shouldn't happen. print('Houston we have a problem.') final_state = BadFilesStatus.DELETED query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False) except NoResultFound: # If no, the replica is marked as LOST in BadFilesStatus query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name']) query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False) @transactional_session def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: Either BAD or SUSPICIOUS. :param scheme: The scheme of the PFNs. :param session: The database session in use. """ unknown_replicas = [] declared_replicas = [] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) replicas = [] proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: # WARNING : this part is ATLAS specific and must be changed path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo=issuer.vo) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD: # For BAD file, we modify the replica state, not for suspicious try: # there shouldn't be any exceptions since all replicas exist update_replicas_states(replicas, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") else: path_clause = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) __exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session) if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS): replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) declared_replicas.append(pfn) path_clause.append(models.RSEFileAssociation.path == path) if path.startswith('/'): path_clause.append(models.RSEFileAssociation.path == path[1:]) else: path_clause.append(models.RSEFileAssociation.path == '/%s' % path) else: if already_declared: unknown_replicas.append('%s %s' % (pfn, 'Already declared')) else: no_hidden_char = True for char in str(pfn): if not isprint(char): unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars')) no_hidden_char = False break if no_hidden_char: unknown_replicas.append('%s %s' % (pfn, 'Unknown replica')) if status == BadFilesStatus.BAD and declared_replicas != []: # For BAD file, we modify the replica state, not for suspicious query = session.query(models.RSEFileAssociation) \ .with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \ .filter(models.RSEFileAssociation.rse_id == rse_id) \ .filter(or_(*path_clause)) rowcount = query.update({'state': ReplicaState.BAD}) if rowcount != len(declared_replicas): # there shouldn't be any exceptions since all replicas exist print(rowcount, len(declared_replicas), declared_replicas) raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param dids: The list of DIDs. :param rse_id: The RSE id. :param reason: The reason of the loss. :param issuer: The issuer account. :param state: BadFilesStatus.BAD :param session: The database session in use. """ unknown_replicas = [] replicas_for_update = [] for did in dids: scope = InternalScope(did['scope'], vo=issuer.vo) name = did['name'] replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session) if replica_exists and not already_declared: replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD}) new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state, account=issuer, bytes=size) new_bad_replica.save(session=session, flush=False) session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False) else: if already_declared: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared')) else: unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica')) if state == BadFilesStatus.BAD: try: update_replicas_states(replicas_for_update, session=session) except exception.UnsupportedOperation: raise exception.ReplicaNotFound("One or several replicas don't exist.") try: session.flush() except (IntegrityError, DatabaseError, FlushError) as error: raise exception.RucioException(error.args) return unknown_replicas @transactional_session def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None): """ Declare a list of bad replicas. :param pfns: The list of PFNs. :param reason: The reason of the loss. :param issuer: The issuer account. :param status: The status of the file (SUSPICIOUS or BAD). :param session: The database session in use. """ scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session) for rse_id in files_to_declare: notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session) if notdeclared: unknown_replicas[rse_id] = notdeclared return unknown_replicas @read_session def get_pfn_to_rse(pfns, vo='def', session=None): """ Get the RSE associated to a list of PFNs. :param pfns: The list of pfn. :param vo: The VO to find RSEs at. :param session: The database session in use. :returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}. """ unknown_replicas = {} storage_elements = [] se_condition = [] dict_rse = {} surls = clean_surls(pfns) scheme = surls[0].split(':')[0] if surls else None for surl in surls: if surl.split(':')[0] != scheme: raise exception.InvalidType('The PFNs specified must have the same protocol') split_se = surl.split('/')[2].split(':') storage_element = split_se[0] if storage_element not in storage_elements: storage_elements.append(storage_element) se_condition.append(models.RSEProtocols.hostname == storage_element) query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\ filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false()) protocols = {} for rse_id, protocol, hostname, port, prefix in query.yield_per(10000): protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix)) hint = None for surl in surls: if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1): dict_rse[hint].append(surl) else: mult_rse_match = 0 for rse_id in protocols: if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo: mult_rse_match += 1 if mult_rse_match > 1: print('ERROR, multiple matches : %s at %s' % (surl, rse_id)) raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session))) hint = rse_id if hint not in dict_rse: dict_rse[hint] = [] dict_rse[hint].append(surl) if mult_rse_match == 0: if 'unknown' not in unknown_replicas: unknown_replicas['unknown'] = [] unknown_replicas['unknown'].append(surl) return scheme, dict_rse, unknown_replicas @read_session def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None): """ List RSE File replicas with no locks. :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this necromancer. :param total_threads: The total number of threads of all necromancers. :param session: The database session in use. :returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}. """ schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else '' if session.bind.dialect.name == 'oracle': # The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used. query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\ filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot, schema_dot))). \ filter(models.RSEFileAssociation.state == ReplicaState.BAD) else: query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ filter(models.RSEFileAssociation.state == ReplicaState.BAD) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot)) query = query.join(models.DataIdentifier, and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope, models.DataIdentifier.name == models.RSEFileAssociation.name)).\ filter(models.DataIdentifier.availability != DIDAvailability.LOST) query = query.limit(limit) rows = [] for scope, name, rse_id in query.yield_per(1000): rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)}) return rows @stream_session def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None): """ Get the DIDs associated to a PFN on one given RSE :param pfns: The list of PFNs. :param rse_id: The RSE id. :param vo: The VO to get DIDs from. :param session: The database session in use. :returns: A dictionary {pfn: {'scope': scope, 'name': name}} """ dict_rse = {} if not rse_id: scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session) if unknown_replicas: raise Exception else: scheme = 'srm' dict_rse[rse_id] = pfns for rse_id in dict_rse: pfns = dict_rse[rse_id] rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session) pfndict = {} proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme) if rse_info['deterministic']: parsed_pfn = proto.parse_pfns(pfns=pfns) # WARNING : this part is ATLAS specific and must be changed for pfn in parsed_pfn: path = parsed_pfn[pfn]['path'] if path.startswith('/user') or path.startswith('/group'): scope = '%s.%s' % (path.split('/')[1], path.split('/')[2]) name = parsed_pfn[pfn]['name'] elif path.startswith('/'): scope = path.split('/')[1] name = parsed_pfn[pfn]['name'] else: scope = path.split('/')[0] name = parsed_pfn[pfn]['name'] scope = InternalScope(scope, vo) yield {pfn: {'scope': scope, 'name': name}} else: condition = [] parsed_pfn = proto.parse_pfns(pfns=pfns) for pfn in parsed_pfn: path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name']) pfndict[path] = pfn condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id)) for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)): yield {pfndict[pfn]: {'scope': scope, 'name': name}} def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session): """ Resolve list of DIDs into a list of conditions. :param dids: The list of data identifiers (DIDs). :param unavailable: (deprecated) Also include unavailable replicas in the list. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param resolve_archives: When set to true, find archives which contain the replicas. :param session: The database session in use. """ did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], [] # Accumulate all the dids which were requested explicitly (not via a container/dataset). # If any replicas for these dids will be found latter, the associated did will be removed from the list, # leaving, at the end, only the requested dids which didn't have any replicas at all. files_wo_replica = [] for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]: if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member files_wo_replica.append({'scope': did['scope'], 'name': did['name']}) file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'], models.RSEFileAssociation.name == did['name'])) else: did_clause.append(and_(models.DataIdentifier.scope == did['scope'], models.DataIdentifier.name == did['name'])) if did_clause: for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type, models.DataIdentifier.constituent)\ .with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\ .filter(or_(*did_clause)): if resolve_archives and constituent: constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope, models.ConstituentAssociation.child_name == name)) if did_type == DIDType.FILE: files_wo_replica.append({'scope': scope, 'name': name}) file_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name)) elif did_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)) else: # Container content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_type) content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') child_dids = [(scope, name)] while child_dids: s, n = child_dids.pop() for tmp_did in content_query.filter_by(scope=s, name=n): if tmp_did.child_type == DIDType.DATASET: dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope, models.DataIdentifierAssociation.name == tmp_did.child_name)) else: child_dids.append((tmp_did.child_scope, tmp_did.child_name)) state_clause = None if not all_states: if not unavailable: state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) else: state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE, models.RSEFileAssociation.state == ReplicaState.COPYING) return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica def _pick_n_random(nrandom, generator): """ Select n random elements from the generator """ if not nrandom: # pass-through the data unchanged yield from generator return # A "reservoir sampling" algorithm: # Copy the N first files from the generator. After that, following element may be picked to substitute # one of the previously selected element with a probability which decreases as the number of encountered elements grows. selected = [] i = 0 iterator = iter(generator) try: for _ in range(nrandom): selected.append(next(iterator)) i += 1 while True: element = next(iterator) i += 1 index_to_substitute = random.randint(0, i) if index_to_substitute < nrandom: selected[index_to_substitute] = element except StopIteration: pass for r in selected: yield r def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of datasets. :param session: The database session in use. """ if not dataset_clause: return replica_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile).\ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle').\ outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\ filter(models.RSE.deleted == false()).\ filter(or_(*dataset_clause)).\ order_by(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name) if not ignore_availability: replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: replica_query = replica_query.filter(and_(state_clause)) if rse_clause is not None: replica_query = replica_query.filter(or_(*rse_clause)) if updated_after: replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500): yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for archive constituents. """ if not constituent_clause: return constituent_query = session.query(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name, models.ConstituentAssociation.scope, models.ConstituentAssociation.name, models.ConstituentAssociation.bytes, models.ConstituentAssociation.md5, models.ConstituentAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile). \ with_hint(models.RSEFileAssociation, text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", dialect_name='oracle'). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ outerjoin(models.RSEFileAssociation, and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope, models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \ join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \ filter(models.RSE.deleted == false()). \ filter(or_(*constituent_clause)). \ order_by(models.ConstituentAssociation.child_scope, models.ConstituentAssociation.child_name) if not ignore_availability: constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: constituent_query = constituent_query.filter(and_(state_clause)) if rse_clause is not None: constituent_query = constituent_query.filter(or_(*rse_clause)) if updated_after: constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after) for replica in constituent_query.yield_per(500): scope, name = replica[0], replica[1] {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield replica def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session): """ List file replicas for a list of files. :param session: The database session in use. """ if not file_clause: return for replica_condition in chunks(file_clause, 50): filters = [ models.RSEFileAssociation.rse_id == models.RSE.id, models.RSE.deleted == false(), or_(*replica_condition), ] if not ignore_availability: filters.append(models.RSE.availability.in_((4, 5, 6, 7))) if state_clause is not None: filters.append(state_clause) if rse_clause: filters.append(or_(*rse_clause)) if updated_after: filters.append(models.RSEFileAssociation.updated_at >= updated_after) replica_query = session.query( models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.bytes, models.RSEFileAssociation.md5, models.RSEFileAssociation.adler32, models.RSEFileAssociation.path, models.RSEFileAssociation.state, models.RSE.id, models.RSE.rse, models.RSE.rse_type, models.RSE.volatile, ) \ .filter(and_(*filters)) \ .order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \ .with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all(): {'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name}) yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile def _list_files_wo_replicas(files_wo_replica, session): if files_wo_replica: file_wo_clause = [] for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])): file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'])) files_wo_replicas_query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.md5, models.DataIdentifier.adler32).\ filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\ with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle') for scope, name, bytes, md5, adler32 in files_wo_replicas_query: yield scope, name, bytes, md5, adler32 def get_vp_endpoint(): """ VP endpoint is the Virtual Placement server. Once VP is integrated in Rucio it won't be needed. """ vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='') return vp_endpoint def get_multi_cache_prefix(cache_site, filename, logger=logging.log): """ for a givent cache site and filename, return address of the cache node that should be prefixed. :param cache_site: Cache site :param filename: Filename """ vp_endpoint = get_vp_endpoint() if not vp_endpoint: return '' x_caches = REGION.get('CacheSites') if x_caches is NO_VALUE: try: response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False) if response.ok: x_caches = response.json() REGION.set('CacheSites', x_caches) else: REGION.set('CacheSites', {'could not reload': ''}) return '' except requests.exceptions.RequestException as re: REGION.set('CacheSites', {'could not reload': ''}) logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re)) return '' if cache_site not in x_caches: return '' xcache_site = x_caches[cache_site] h = float( unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64 for irange in xcache_site['ranges']: if h < irange[1]: return xcache_site['servers'][irange[0]][0] return '' def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filters, ignore_availability, session): # iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory replicas = heapq.merge( _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session), key=lambda t: (t[0], t[1]), # sort by scope, name ) # we need to retain knowledge of the original domain selection by the user # in case we have to loop over replicas with a potential outgoing proxy original_domain = deepcopy(domain) # find all RSEs local to the client's location in autoselect mode (i.e., when domain is None) local_rses = [] if domain is None: if client_location and 'site' in client_location and client_location['site']: try: local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)] except Exception: pass # do not hard fail if site cannot be resolved or is empty file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {} for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas: pfns = [] # reset the domain selection to original user's choice (as this could get overwritten each iteration) domain = deepcopy(original_domain) if show_pfns and rse_id: if rse_id not in rse_info: rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session) # assign scheme priorities, and don't forget to exclude disabled protocols # 0 in RSE protocol definition = disabled, 1 = highest priority rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0} rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0} # select the lan door in autoselect mode, otherwise use the wan door if domain is None: domain = 'wan' if local_rses and rse_id in local_rses: domain = 'lan' if rse_id not in tmp_protocols: rse_schemes = schemes or [] if not rse_schemes: try: if domain == 'all': rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='wan')['scheme']) rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain='lan')['scheme']) else: rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id], operation='read', domain=domain)['scheme']) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) if archive_scope and archive_name and 'root' not in rse_schemes: rse_schemes.append('root') protocols = [] for s in rse_schemes: try: if domain == 'all': protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='lan'), rse_info[rse_id]['priority_lan'][s])) protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain='wan'), rse_info[rse_id]['priority_wan'][s])) else: protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id], operation='read', scheme=s, domain=domain), rse_info[rse_id]['priority_%s' % domain][s])) except exception.RSEProtocolNotSupported: pass # no need to be verbose except Exception: print(format_exc()) tmp_protocols[rse_id] = protocols # get pfns for tmp_protocol in tmp_protocols[rse_id]: # If the current "replica" is a constituent inside an archive, we must construct the pfn for the # parent (archive) file and append the xrdcl.unzip query string to it. if archive_scope and archive_name: t_scope = archive_scope t_name = archive_name else: t_scope = scope t_name = name protocol = tmp_protocol[1] if 'determinism_type' in protocol.attributes: # PFN is cachable try: path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] except KeyError: # No cache entry scope:name found for this protocol path = protocol._get_path(t_scope, t_name) pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path try: pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external, 'name': t_name, 'path': path}).values())[0] # do we need to sign the URLs? if sign_urls and protocol.attributes['scheme'] == 'https': service = get_rse_attribute('sign_url', rse_id=rse_id, session=session) if service and isinstance(service, list): pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime) # server side root proxy handling if location is set. # supports root and http destinations # cannot be pushed into protocols because we need to lookup rse attributes. # ultra-conservative implementation. if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location: if 'site' in client_location and client_location['site']: # is the RSE site-configured? rse_site_attr = get_rse_attribute('site', rse_id, session=session) replica_site = [''] if isinstance(rse_site_attr, list) and rse_site_attr: replica_site = rse_site_attr[0] # does it match with the client? if not, it's an outgoing connection # therefore the internal proxy must be prepended if client_location['site'] != replica_site: cache_site = config_get('clientcachemap', client_location['site'], default='', session=session) if cache_site != '': # print('client', client_location['site'], 'has cache:', cache_site) # print('filename', name) selected_prefix = get_multi_cache_prefix(cache_site, t_name) if selected_prefix: pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://') else: # print('site:', client_location['site'], 'has no cache') # print('lets check if it has defined an internal root proxy ') root_proxy_internal = config_get('root-proxy-internal', # section client_location['site'], # option default='', # empty string to circumvent exception session=session) if root_proxy_internal: # TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs. # For now -> skip prepending XCache for GCS. if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn: pass # ATLAS HACK else: # don't forget to mangle gfal-style davs URL into generic https URL pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://') # PFNs don't have concepts, therefore quickly encapsulate in a tuple # ('pfn', 'domain', 'priority', 'client_extract') t_domain = tmp_protocol[0] t_priority = tmp_protocol[2] t_client_extract = False if archive_scope and archive_name: t_domain = 'zip' pfn = add_url_query(pfn, {'xrdcl.unzip': name}) if protocol.attributes['scheme'] == 'root': # xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot. t_client_extract = False t_priority = -1 else: t_client_extract = True pfns.append((pfn, t_domain, t_priority, t_client_extract)) except Exception: # never end up here print(format_exc()) if protocol.attributes['scheme'] == 'srm': try: file['space_token'] = protocol.attributes['extended_attributes']['space_token'] except KeyError: file['space_token'] = None if 'scope' in file and 'name' in file: if file['scope'] == scope and file['name'] == name: # extract properly the pfn from the tuple file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns])) file['states'][rse_id] = str(state.name if state else state) if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} else: if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # quick exit, but don't forget to set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 file['rses'] = {} rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} if not ('scope' in file and 'name' in file): file['scope'], file['name'] = scope, name file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32 file['pfns'], file['rses'] = {}, defaultdict(list) file['states'] = {rse_id: str(state.name if state else state)} if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)] if rse_id: # extract properly the pfn from the tuple file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns])) for tmp_pfn in pfns: file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id, 'rse': rse, 'type': str(rse_type.name), 'volatile': volatile, 'domain': tmp_pfn[1], 'priority': tmp_pfn[2], 'client_extract': tmp_pfn[3]} # set the total order for the priority # --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically # and use 1-indexing to be compatible with metalink if 'pfns' in file: tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']]) for i in range(0, len(tmp)): file['pfns'][tmp[i][2]]['priority'] = i + 1 if 'scope' in file and 'name' in file: file['rses'] = {} # don't forget to resolve parents for the last replica if resolve_parents: file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name']) for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)] # also sort the pfns inside the rse structure rse_pfns = [] for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]: rse_pfns.append((t_rse, t_priority, t_pfn)) rse_pfns = sorted(rse_pfns) for t_rse, t_priority, t_pfn in rse_pfns: if t_rse in file['rses']: file['rses'][t_rse].append(t_pfn) else: file['rses'][t_rse] = [t_pfn] yield file file = {} for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session): yield { 'scope': scope, 'name': name, 'bytes': bytes, 'md5': md5, 'adler32': adler32, 'pfns': {}, 'rses': defaultdict(list) } @stream_session def list_replicas(dids, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=False, pfns=True, rse_expression=None, client_location=None, domain=None, sign_urls=False, signature_lifetime=None, resolve_archives=True, resolve_parents=False, nrandom=None, updated_after=None, session=None): """ List file replicas for a list of data identifiers (DIDs). :param dids: The list of data identifiers (DIDs). :param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...) :param unavailable: (deprecated) Also include unavailable replicas in the list. :param request_id: ID associated with the request for debugging. :param ignore_availability: Ignore the RSE blocklisting. :param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary. :param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs. :param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'} :param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan'] :param sign_urls: If set, will sign the PFNs if necessary. :param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN. :param resolve_archives: When set to true, find archives which contain the replicas. :param resolve_parents: When set to true, find all parent datasets which contain the replicas. :param updated_after: datetime (UTC time), only return replicas updated after this time :param session: The database session in use. """ if dids: filter = {'vo': dids[0]['scope'].vo} else: filter = {'vo': 'def'} file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids( dids=dids, unavailable=unavailable, ignore_availability=ignore_availability, all_states=all_states, resolve_archives=resolve_archives, session=session ) rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) yield from _pick_n_random( nrandom, _list_replicas(dataset_clause, file_clause, state_clause, pfns, schemes, files_wo_replica, rse_clause, client_location, domain, sign_urls, signature_lifetime, constituent_clause, resolve_parents, updated_after, filter, ignore_availability, session) ) @transactional_session def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of new files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ for file in files: new_did = models.DataIdentifier(scope=file['scope'], name=file['name'], account=file.get('account') or account, did_type=DIDType.FILE, bytes=file['bytes'], md5=file.get('md5'), adler32=file.get('adler32'), is_new=None) new_did.save(session=session, flush=False) if 'meta' in file and file['meta']: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session) if dataset_meta: rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session) try: session.flush() except IntegrityError as error: if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \ or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \ or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \ or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \ or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \ or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except DatabaseError as error: if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]): raise exception.ScopeNotFound('Scope not found!') raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def __bulk_add_file_dids(files, account, dataset_meta=None, session=None): """ Bulk add new dids. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ condition = [] for f in files: condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE)) q = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.bytes, models.DataIdentifier.adler32, models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition)) available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q] new_files = list() for file in files: found = False for available_file in available_files: if file['scope'] == available_file['scope'] and file['name'] == available_file['name']: found = True break if not found: new_files.append(file) __bulk_add_new_file_dids(files=new_files, account=account, dataset_meta=dataset_meta, session=session) return new_files + available_files def tombstone_from_delay(tombstone_delay): # Tolerate None for tombstone_delay if not tombstone_delay: return None if not isinstance(tombstone_delay, timedelta): try: tombstone_delay = timedelta(seconds=int(tombstone_delay)) except ValueError: return None if not tombstone_delay: return None if tombstone_delay < timedelta(0): return datetime(1970, 1, 1) return datetime.utcnow() + tombstone_delay @transactional_session def __bulk_add_replicas(rse_id, files, account, session=None): """ Bulk add new dids. :param rse_id: the RSE id. :param dids: the list of files. :param account: The account owner. :param session: The database session in use. :returns: True is successful. """ nbfiles, bytes = 0, 0 # Check for the replicas already available condition = [] for f in files: condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id)) query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ filter(or_(*condition)) available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query] default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None) default_tombstone = tombstone_from_delay(default_tombstone_delay) new_replicas = [] for file in files: found = False for available_replica in available_replicas: if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']: found = True break if not found: nbfiles += 1 bytes += file['bytes'] new_replicas.append({'rse_id': rse_id, 'scope': file['scope'], 'name': file['name'], 'bytes': file['bytes'], 'path': file.get('path'), 'state': ReplicaState(file.get('state', 'A')), 'md5': file.get('md5'), 'adler32': file.get('adler32'), 'lock_cnt': file.get('lock_cnt', 0), 'tombstone': file.get('tombstone') or default_tombstone}) try: new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation, new_replicas) session.flush() return nbfiles, bytes except IntegrityError as error: if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \ or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \ or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \ or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]): raise exception.Duplicate("File replica already exists!") raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) @transactional_session def add_replicas(rse_id, files, account, ignore_availability=True, dataset_meta=None, session=None): """ Bulk add file replicas. :param rse_id: The RSE id. :param files: The list of files. :param account: The account owner. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. :returns: True is successful. """ def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None): p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr) expected_pfns = p.lfns2pfns(lfns) return clean_surls(expected_pfns.values()) replica_rse = get_rse(rse_id=rse_id, session=session) if replica_rse.volatile is True: raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse)) if not (replica_rse.availability & 2) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse) replicas = __bulk_add_file_dids(files=files, account=account, dataset_meta=dataset_meta, session=session) pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]} for file in files: if 'pfn' not in file: if not replica_rse.deterministic: raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse)) else: scheme = file['pfn'].split(':')[0] pfns.setdefault(scheme, []).append(file['pfn']) if pfns: rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session) for scheme in pfns.keys(): if not replica_rse.deterministic: p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme) pfns[scheme] = p.parse_pfns(pfns=pfns[scheme]) for file in files: if file['pfn'].startswith(scheme): tmp = pfns[scheme][file['pfn']] file['path'] = ''.join([tmp['path'], tmp['name']]) else: # Check that the pfns match to the expected pfns lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)] pfns[scheme] = clean_surls(pfns[scheme]) # Check wan first found_on_wan = False available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan') expected_pfns_wan = None for protocol_attr in available_wan_protocols: pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr) if not expected_pfns_wan and pfns_wan_buffer: expected_pfns_wan = pfns_wan_buffer found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme]) if found_on_wan: break if not found_on_wan: # Check lan found_on_lan = False available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan') for protocol_attr in available_lan_protocols: pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr) found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme]) if found_on_lan: break if found_on_lan == pfns[scheme]: # Registration always with wan pfns[scheme] = expected_pfns_wan else: raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns))) nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session) increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session) return replicas @transactional_session def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None): """ Add File replica. :param rse_id: the rse id. :param scope: the scope name. :param name: The data identifier name. :param bytes: the size of the file. :param account: The account owner. :param md5: The md5 checksum. :param adler32: The adler32 checksum. :param pfn: Physical file name (for nondeterministic rse). :param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary. :param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ]. :param tombstone: If True, create replica with a tombstone. :param session: The database session in use. :returns: True is successful. """ if meta is None: meta = {} file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone} if pfn: file['pfn'] = pfn return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session) @transactional_session def delete_replicas(rse_id, files, ignore_availability=True, session=None): """ Delete file replicas. :param rse_id: the rse id. :param files: the list of files to delete. :param ignore_availability: Ignore the RSE blocklisting. :param session: The database session in use. """ replica_rse = get_rse(rse_id=rse_id, session=session) if not (replica_rse.availability & 1) and not ignore_availability: raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable' 'for deleting' % replica_rse.rse) replica_condition, src_condition = [], [] for file in files: replica_condition.append( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])) src_condition.append( and_(models.Source.scope == file['scope'], models.Source.name == file['name'], models.Source.rse_id == rse_id)) delta, bytes, rowcount = 0, 0, 0 # WARNING : This should not be necessary since that would mean the replica is used as a source. for chunk in chunks(src_condition, 10): rowcount = session.query(models.Source). \ filter(or_(*chunk)). \ delete(synchronize_session=False) rowcount = 0 for chunk in chunks(replica_condition, 10): for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \ with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)): bytes += replica_bytes delta += 1 rowcount += session.query(models.RSEFileAssociation). \ filter(models.RSEFileAssociation.rse_id == rse_id). \ filter(or_(*chunk)). \ delete(synchronize_session=False) if rowcount != len(files): raise exception.ReplicaNotFound("One or several replicas don't exist.") __cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session) # Decrease RSE counter decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session) @transactional_session def __cleanup_after_replica_deletion(rse_id, files, session=None): """ Perform update of collections/archive associations/dids after the removal of their replicas :param rse_id: the rse id :param files: list of files whose replica got deleted :param session: The database session in use. """ parent_condition, did_condition = [], [] clt_replica_condition, dst_replica_condition = [], [] incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], [] for file in files: # Schedule update of all collections containing this file and having a collection replica in the RSE dst_replica_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where( and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope, models.CollectionReplica.name == models.DataIdentifierAssociation.name, models.CollectionReplica.rse_id == rse_id)))) # If the file doesn't have any replicas anymore, we should perform cleanups of objects # related to this file. However, if the file is "lost", it's removal wasn't intentional, # so we want to skip deleting the metadata here. Perform cleanups: # 1) schedule removal of this file from all parent datasets parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == file['scope'], models.DataIdentifierAssociation.child_name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 2) schedule removal of this file from the DID table did_condition.append( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability != DIDAvailability.LOST, ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])), ~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where( and_(models.ConstituentAssociation.child_scope == file['scope'], models.ConstituentAssociation.child_name == file['name'])))) # 3) if the file is an archive, schedule cleanup on the files from inside the archive archive_contents_condition.append( and_(models.ConstituentAssociation.scope == file['scope'], models.ConstituentAssociation.name == file['name'], ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == file['scope'], models.DataIdentifier.name == file['name'], models.DataIdentifier.availability == DIDAvailability.LOST)), ~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where( and_(models.RSEFileAssociation.scope == file['scope'], models.RSEFileAssociation.name == file['name'])))) # Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica if dst_replica_condition: for chunk in chunks(dst_replica_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\ filter(or_(*chunk)).\ distinct() for parent_scope, parent_name in query: models.UpdatedCollectionReplica(scope=parent_scope, name=parent_name, did_type=DIDType.DATASET, rse_id=rse_id).\ save(session=session, flush=False) # Delete did from the content for the last did while parent_condition: child_did_condition, tmp_parent_condition = [], [] for chunk in chunks(parent_condition, 10): query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\ filter(or_(*chunk)) for parent_scope, parent_name, did_type, child_scope, child_name in query: # Schedule removal of child file/dataset/container from the parent dataset/container child_did_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, models.DataIdentifierAssociation.child_scope == child_scope, models.DataIdentifierAssociation.child_name == child_name)) # Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore clt_is_not_archive_condition.append( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope, models.DataIdentifier.name == models.DataIdentifierAssociation.name, models.DataIdentifier.is_archive == true())), ~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope, models.DataIdentifier.name == models.DataIdentifierAssociation.child_name, models.DataIdentifier.is_archive == true())))) # If the parent dataset/container becomes empty as a result of the child removal # (it was the last children), metadata cleanup has to be done: # # 1) Schedule to remove the replicas of this empty collection clt_replica_condition.append( and_(models.CollectionReplica.scope == parent_scope, models.CollectionReplica.name == parent_name, exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False)), # NOQA ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 2) Schedule removal of this empty collection from its own parent collections tmp_parent_condition.append( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name, ~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) # 3) Schedule removal of the entry from the DIDs table did_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.is_open == False, # NOQA ~exists([1]).where( and_(models.DataIdentifierAssociation.child_scope == parent_scope, models.DataIdentifierAssociation.child_name == parent_name)), ~exists([1]).where( and_(models.DataIdentifierAssociation.scope == parent_scope, models.DataIdentifierAssociation.name == parent_name)))) if child_did_condition: # get the list of modified parent scope, name for chunk in chunks(child_did_condition, 10): modifieds = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.did_type).\ distinct().\ with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(exists(select([1]). prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')). where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope, models.DataIdentifierAssociation.name == models.DataIdentifier.name, or_(models.DataIdentifier.complete == true(), models.DataIdentifier.complete is None)))) for parent_scope, parent_name, parent_did_type in modifieds: message = {'scope': parent_scope, 'name': parent_name, 'did_type': parent_did_type, 'event_type': 'INCOMPLETE'} if message not in messages: messages.append(message) incomplete_condition.append( and_(models.DataIdentifier.scope == parent_scope, models.DataIdentifier.name == parent_name, models.DataIdentifier.did_type == parent_did_type)) for chunk in chunks(child_did_condition, 10): rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session) session.query(models.DataIdentifierAssociation).\ filter(or_(*chunk)).\ delete(synchronize_session=False) parent_condition = tmp_parent_condition for chunk in chunks(clt_replica_condition, 10): session.query(models.CollectionReplica).\ filter(or_(*chunk)).\ delete(synchronize_session=False) # Update incomplete state for chunk in chunks(incomplete_condition, 10): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ filter(models.DataIdentifier.complete != false()).\ update({'complete': False}, synchronize_session=False) # delete empty dids messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], [] for chunk in chunks(did_condition, 100): query = session.query(models.DataIdentifier.scope, models.DataIdentifier.name, models.DataIdentifier.did_type).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)) for scope, name, did_type in query: if did_type == DIDType.DATASET: messages.append({'event_type': 'ERASE', 'payload': dumps({'scope': scope.external, 'name': name, 'account': 'root'})}) deleted_rules.append(and_(models.ReplicationRule.scope == scope, models.ReplicationRule.name == name)) deleted_dids.append(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name)) if session.bind.dialect.name == 'oracle': oracle_version = int(session.connection().connection.version.split('.')[0]) if oracle_version >= 12: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) else: deleted_did_meta.append(and_(models.DidMeta.scope == scope, models.DidMeta.name == name)) # Remove Archive Constituents removed_constituents = [] constituents_to_delete_condition = [] for chunk in chunks(archive_contents_condition, 30): query = session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \ filter(or_(*chunk)) for constituent in query: removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name}) constituents_to_delete_condition.append( and_(models.ConstituentAssociation.scope == constituent.scope, models.ConstituentAssociation.name == constituent.name, models.ConstituentAssociation.child_scope == constituent.child_scope, models.ConstituentAssociation.child_name == constituent.child_name)) models.ConstituentAssociationHistory( child_scope=constituent.child_scope, child_name=constituent.child_name, scope=constituent.scope, name=constituent.name, bytes=constituent.bytes, adler32=constituent.adler32, md5=constituent.md5, guid=constituent.guid, length=constituent.length, updated_at=constituent.updated_at, created_at=constituent.created_at, ).save(session=session, flush=False) if len(constituents_to_delete_condition) > 200: session.query(models.ConstituentAssociation).\ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\ filter(or_(*constituents_to_delete_condition)).\ delete(synchronize_session=False) constituents_to_delete_condition.clear() __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) removed_constituents.clear() if constituents_to_delete_condition: session.query(models.ConstituentAssociation). \ with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \ filter(or_(*constituents_to_delete_condition)). \ delete(synchronize_session=False) __cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session) # Remove rules in Waiting for approval or Suspended for chunk in chunks(deleted_rules, 100): session.query(models.ReplicationRule).\ with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\ filter(or_(*chunk)).\ filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED, RuleState.WAITING_APPROVAL))).\ delete(synchronize_session=False) # Remove DID Metadata for chunk in chunks(deleted_did_meta, 100): session.query(models.DidMeta).\ filter(or_(*chunk)).\ delete(synchronize_session=False) for chunk in chunks(messages, 100): session.bulk_insert_mappings(models.Message, chunk) for chunk in chunks(deleted_dids, 100): session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(*chunk)).\ delete(synchronize_session=False) if session.bind.dialect.name != 'oracle': rucio.core.did.insert_deleted_dids(chunk, session=session) # Set is_archive = false on collections which don't have archive children anymore for chunk in chunks(clt_is_not_archive_condition, 100): clt_to_update = list(session .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .distinct(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name) .with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle') .filter(or_(*chunk))) if clt_to_update: session.query(models.DataIdentifier).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ filter(or_(and_(models.DataIdentifier.scope == scope, models.DataIdentifier.name == name, models.DataIdentifier.is_archive == true()) for scope, name in clt_to_update)).\ update({'is_archive': False}, synchronize_session=False) @transactional_session def get_replica(rse_id, scope, name, session=None): """ Get File replica. :param rse_id: The RSE Id. :param scope: the scope name. :param name: The data identifier name. :param session: The database session in use. :returns: A dictionary with the list of replica attributes. """ try: row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one() result = {} for column in row.__table__.columns: result[column.name] = getattr(row, column.name) return result except NoResultFound: raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @transactional_session def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None): """ List RSE File replicas with no locks. :param limit: Number of replicas returned. :param bytes: The amount of needed bytes. :param rse_id: The rse_id. :param delay_seconds: The delay to query replicas in BEING_DELETED state :param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone :param session: The database session in use. :returns: a list of dictionary replica. """ none_value = None # Hack to get pep8 happy... query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path, models.RSEFileAssociation.bytes, models.RSEFileAssociation.tombstone, models.RSEFileAssociation.state).\ with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\ filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\ filter(models.RSEFileAssociation.lock_cnt == 0).\ filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\ filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)), and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\ filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle') .where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\ with_for_update(skip_locked=True).\ order_by(models.RSEFileAssociation.tombstone) needed_space = bytes total_bytes, total_files = 0, 0 rows = [] replica_clause = [] for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000): # Check if more than one replica is available replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one() if replica_cnt[0] > 1: if state != ReplicaState.UNAVAILABLE: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) else: # If this is the last replica, check if there are some requests request_cnt = session.query(func.count()).\ with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\ filter(and_(models.Request.scope == scope, models.Request.name == name)).one() if request_cnt[0] == 0: if tombstone != OBSOLETE: if only_delete_obsolete: break if needed_space is not None and total_bytes > needed_space: break total_bytes += bytes total_files += 1 if total_files > limit: break rows.append({'scope': scope, 'name': name, 'path': path, 'bytes': bytes, 'tombstone': tombstone, 'state': state}) replica_clause.append(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id == rse_id)) for chunk in chunks(replica_clause, 100): session.query(models.RSEFileAssociation).filter(or_(*chunk)).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\ update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False) return rows @transactional_session def update_replicas_states(replicas, nowait=False, session=None): """ Update File replica information and state. :param replicas: The list of replicas. :param nowait: Nowait parameter for the for_update queries. :param session: The database session in use. """ for replica in replicas: query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']) try: if nowait: query.with_for_update(nowait=True).one() except NoResultFound: # remember scope, name and rse raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session))) if isinstance(replica['state'], string_types): replica['state'] = ReplicaState(replica['state']) values = {'state': replica['state']} if replica['state'] == ReplicaState.BEING_DELETED: query = query.filter_by(lock_cnt=0) # Exclude replicas use as sources stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope, models.RSEFileAssociation.name == models.Source.name, models.RSEFileAssociation.rse_id == models.Source.rse_id)) query = query.filter(not_(stmt)) values['tombstone'] = OBSOLETE elif replica['state'] == ReplicaState.AVAILABLE: rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session) elif replica['state'] == ReplicaState.UNAVAILABLE: rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], error_message=replica.get('error_message', None), broken_rule_id=replica.get('broken_rule_id', None), broken_message=replica.get('broken_message', None), nowait=nowait, session=session) elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE: query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE)) if 'path' in replica and replica['path']: values['path'] = replica['path'] if not query.update(values, synchronize_session=False): if 'rse' not in replica: replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session) raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica) return True @transactional_session def touch_replica(replica, session=None): """ Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked. :param replica: a dictionary with the information of the affected replica. :param session: The database session in use. :returns: True, if successful, False otherwise. """ try: accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None session.query(models.RSEFileAssociation).\ filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\ with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\ update({'accessed_at': accessed_at, 'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.tombstone != OBSOLETE), accessed_at)], else_=models.RSEFileAssociation.tombstone)}, synchronize_session=False) session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ with_for_update(nowait=True).one() session.query(models.DataIdentifier).\ filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\ with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\ update({'accessed_at': accessed_at}, synchronize_session=False) except DatabaseError: return False except NoResultFound: return True return True @transactional_session def update_replica_state(rse_id, scope, name, state, session=None): """ Update File replica information and state. :param rse_id: the rse id. :param scope: the tag name. :param name: The data identifier name. :param state: The state. :param session: The database session in use. """ return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session) @transactional_session def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None): """ Get file replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return query.with_for_update(nowait=nowait).all() @transactional_session def get_source_replicas(scope, name, source_rses=None, session=None): """ Get soruce replicas for a specific scope:name. :param scope: The scope of the did. :param name: The name of the did. :param soruce_rses: Possible RSE_ids to filter on. :param session: The db session in use. :returns: List of SQLAlchemy Replica Objects """ query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = query.filter(or_(*rse_clause)) return [a[0] for a in query.all()] @transactional_session def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param nowait: Nowait parameter for the FOR UPDATE statement :param restrict_rses: Possible RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ files, replicas = {}, {} if session.bind.dialect.name == 'postgresql': # Get content content_query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32).\ with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle').\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000): files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} replicas[(child_scope, child_name)] = [] # Get replicas and lock them query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) else: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle') \ .with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if restrict_rses is not None: if len(restrict_rses) < 10: rse_clause = [] for rse_id in restrict_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.bytes, models.DataIdentifierAssociation.md5, models.DataIdentifierAssociation.adler32, models.RSEFileAssociation)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt) for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000): if (child_scope, child_name) not in files: files[(child_scope, child_name)] = {'scope': child_scope, 'name': child_name, 'bytes': bytes, 'md5': md5, 'adler32': adler32} if (child_scope, child_name) in replicas: if replica is not None: replicas[(child_scope, child_name)].append(replica) else: replicas[(child_scope, child_name)] = [] if replica is not None: replicas[(child_scope, child_name)].append(replica) return (list(files.values()), replicas) @transactional_session def get_source_replicas_for_dataset(scope, name, source_rses=None, total_threads=None, thread_id=None, session=None): """ Get file replicas for all files of a dataset. :param scope: The scope of the dataset. :param name: The name of the dataset. :param source_rses: Possible source RSE_ids to filter on. :param total_threads: Total threads :param thread_id: This thread :param session: The db session in use. :returns: (files in dataset, replicas in dataset) """ query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\ filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if source_rses: if len(source_rses) < 10: rse_clause = [] for rse_id in source_rses: rse_clause.append(models.RSEFileAssociation.rse_id == rse_id) if rse_clause: query = session.query(models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name, models.RSEFileAssociation.rse_id)\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .outerjoin(models.RSEFileAssociation, and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.RSEFileAssociation.state == ReplicaState.AVAILABLE, or_(*rse_clause)))\ .filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name) if total_threads and total_threads > 1: query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread_id, hash_variable='child_name') replicas = {} for child_scope, child_name, rse_id in query: if (child_scope, child_name) in replicas: if rse_id: replicas[(child_scope, child_name)].append(rse_id) else: replicas[(child_scope, child_name)] = [] if rse_id: replicas[(child_scope, child_name)].append(rse_id) return replicas @read_session def get_replica_atime(replica, session=None): """ Get the accessed_at timestamp for a replica. Just for testing. :param replicas: List of dictionaries {scope, name, rse_id, path} :param session: Database session to use. :returns: A datetime timestamp with the last access time. """ return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\ with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0] @transactional_session def touch_collection_replicas(collection_replicas, session=None): """ Update the accessed_at timestamp of the given collection replicas. :param collection_replicas: the list of collection replicas. :param session: The database session in use. :returns: True, if successful, False otherwise. """ now = datetime.utcnow() for collection_replica in collection_replicas: try: session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\ update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False) except DatabaseError: return False return True @stream_session def list_dataset_replicas(scope, name, deep=False, session=None): """ :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ if not deep: query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() else: # find maximum values content_query = session\ .query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"), func.count().label("length"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name) bytes, length = 0, 0 for row in content_query: bytes, length = row.bytes, row.length # find archives that contain files of the requested dataset sub_query_archives = session\ .query(models.DataIdentifierAssociation.scope.label('dataset_scope'), models.DataIdentifierAssociation.name.label('dataset_name'), models.DataIdentifierAssociation.bytes.label('file_bytes'), models.ConstituentAssociation.child_scope.label('file_scope'), models.ConstituentAssociation.child_name.label('file_name'), models.RSEFileAssociation.scope.label('replica_scope'), models.RSEFileAssociation.name.label('replica_name'), models.RSE.rse, models.RSE.id.label('rse_id'), models.RSEFileAssociation.created_at, models.RSEFileAssociation.accessed_at, models.RSEFileAssociation.updated_at)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\ .filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\ .filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\ .filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\ .filter(models.RSEFileAssociation.rse_id == models.RSE.id)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .filter(models.RSE.deleted == false())\ .subquery() # count the metrics group_query_archives = session\ .query(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse, func.sum(sub_query_archives.c.file_bytes).label('file_bytes'), func.min(sub_query_archives.c.created_at).label('created_at'), func.max(sub_query_archives.c.updated_at).label('updated_at'), func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(sub_query_archives.c.dataset_scope, sub_query_archives.c.dataset_name, sub_query_archives.c.file_scope, sub_query_archives.c.file_name, sub_query_archives.c.rse_id, sub_query_archives.c.rse)\ .subquery() # bring it in the same column state as the non-archive query full_query_archives = session\ .query(group_query_archives.c.dataset_scope.label('scope'), group_query_archives.c.dataset_name.label('name'), group_query_archives.c.rse_id, group_query_archives.c.rse, func.sum(group_query_archives.c.file_bytes).label('available_bytes'), func.count().label('available_length'), func.min(group_query_archives.c.created_at).label('created_at'), func.max(group_query_archives.c.updated_at).label('updated_at'), func.max(group_query_archives.c.accessed_at).label('accessed_at'))\ .group_by(group_query_archives.c.dataset_scope, group_query_archives.c.dataset_name, group_query_archives.c.rse_id, group_query_archives.c.rse) # find the non-archive dataset replicas sub_query = session\ .query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id, func.sum(models.RSEFileAssociation.bytes).label("available_bytes"), func.count().label("available_length"), func.min(models.RSEFileAssociation.created_at).label("created_at"), func.max(models.RSEFileAssociation.updated_at).label("updated_at"), func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\ .with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\ .filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\ .filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\ .filter(models.DataIdentifierAssociation.scope == scope)\ .filter(models.DataIdentifierAssociation.name == name)\ .filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\ .group_by(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name, models.RSEFileAssociation.rse_id)\ .subquery() query = session\ .query(sub_query.c.scope, sub_query.c.name, sub_query.c.rse_id, models.RSE.rse, sub_query.c.available_bytes, sub_query.c.available_length, sub_query.c.created_at, sub_query.c.updated_at, sub_query.c.accessed_at)\ .filter(models.RSE.id == sub_query.c.rse_id)\ .filter(models.RSE.deleted == false()) # join everything together final_query = query.union_all(full_query_archives) for row in final_query.all(): replica = row._asdict() replica['length'], replica['bytes'] = length, bytes if replica['length'] == row.available_length: replica['state'] = ReplicaState.AVAILABLE else: replica['state'] = ReplicaState.UNAVAILABLE yield replica @stream_session def list_dataset_replicas_bulk(names_by_intscope, session=None): """ :param names_by_intscope: The dictionary of internal scopes pointing at the list of names. :param session: Database session to use. :returns: A list of dictionaries containing the dataset replicas with associated metrics and timestamps """ condition = [] for scope in names_by_intscope: condition.append(and_(models.CollectionReplica.scope == scope, models.CollectionReplica.name.in_(names_by_intscope[scope]))) try: # chunk size refers to the number of different scopes, see above for chunk in chunks(condition, 10): query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.rse, models.CollectionReplica.rse_id, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at) \ .filter(models.CollectionReplica.did_type == DIDType.DATASET) \ .filter(models.CollectionReplica.rse_id == models.RSE.id) \ .filter(or_(*chunk)) \ .filter(models.RSE.deleted == false()) for row in query: yield row._asdict() except NoResultFound: raise exception.DataIdentifierNotFound('No Data Identifiers found') @stream_session def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log): """ List dataset replicas for a DID (scope:name) using the Virtual Placement service. NOTICE: This is an RnD function and might change or go away at any time. :param scope: The scope of the dataset. :param name: The name of the dataset. :param deep: Lookup at the file level. :param session: Database session to use. :returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites """ vp_endpoint = get_vp_endpoint() vp_replies = ['other'] nr_replies = 5 # force limit reply size if not vp_endpoint: return vp_replies try: vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name), verify=False, timeout=1) if vp_replies.status_code == 200: vp_replies = vp_replies.json() else: vp_replies = ['other'] except requests.exceptions.RequestException as re: logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re)) vp_replies = ['other'] if vp_replies != ['other']: # check that there is at least one regular replica # that is not on tape and has a protocol with scheme "root" # and can be accessed from WAN accessible_replica_exists = False for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session): rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session) if rse_info['rse_type'] == 'TAPE': continue for prot in rse_info['protocols']: if prot['scheme'] == 'root' and prot['domains']['wan']['read']: accessible_replica_exists = True break if accessible_replica_exists is True: break if accessible_replica_exists is True: for vp_reply in vp_replies: yield {'vp': True, 'site': vp_reply} @stream_session def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None): """ List datasets at a RSE. :param rse: the rse id. :param filters: dictionary of attributes by which the results should be filtered. :param limit: limit number. :param session: Database session to use. :returns: A list of dict dataset replicas """ query = session.query(models.CollectionReplica.scope, models.CollectionReplica.name, models.RSE.id.label('rse_id'), models.RSE.rse, models.CollectionReplica.bytes, models.CollectionReplica.length, models.CollectionReplica.available_bytes, models.CollectionReplica.available_replicas_cnt.label("available_length"), models.CollectionReplica.state, models.CollectionReplica.created_at, models.CollectionReplica.updated_at, models.CollectionReplica.accessed_at)\ .filter_by(did_type=DIDType.DATASET)\ .filter(models.CollectionReplica.rse_id == models.RSE.id)\ .filter(models.RSE.id == rse_id)\ .filter(models.RSE.deleted == false()) for (k, v) in filters and filters.items() or []: if k == 'name' or k == 'scope': v_str = v if k != 'scope' else v.internal if '*' in v_str or '%' in v_str: if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'))) else: query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\')) else: query = query.filter(getattr(models.CollectionReplica, k) == v) # hints ? elif k == 'created_before': created_before = str_to_date(v) query = query.filter(models.CollectionReplica.created_at <= created_before) elif k == 'created_after': created_after = str_to_date(v) query = query.filter(models.CollectionReplica.created_at >= created_after) else: query = query.filter(getattr(models.CollectionReplica, k) == v) if limit: query = query.limit(limit) for row in query: yield row._asdict() @transactional_session def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None): """ Get update request for collection replicas. :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: Maximum numberws to return. :param session: Database session in use. :returns: List of update requests for collection replicas. """ # Delete update requests which do not have collection_replicas session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False) session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None) & ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503 models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope, models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False) # Delete duplicates if session.bind.dialect.name == 'oracle': schema = '' if BASE.metadata.schema: schema = BASE.metadata.schema + '.' session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema)) elif session.bind.dialect.name == 'mysql': subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\ group_by(models.UpdatedCollectionReplica.scope, models.UpdatedCollectionReplica.name, models.UpdatedCollectionReplica.rse_id).subquery() subquery2 = session.query(subquery1.c.max_id).subquery() session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False) else: replica_update_requests = session.query(models.UpdatedCollectionReplica) update_requests_with_rse_id = [] update_requests_without_rse_id = [] duplicate_request_ids = [] for update_request in replica_update_requests.all(): if update_request.rse_id is not None: small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id} if small_request not in update_requests_with_rse_id: update_requests_with_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue else: small_request = {'name': update_request.name, 'scope': update_request.scope} if small_request not in update_requests_without_rse_id: update_requests_without_rse_id.append(small_request) else: duplicate_request_ids.append(update_request.id) continue for chunk in chunks(duplicate_request_ids, 100): session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False) query = session.query(models.UpdatedCollectionReplica) if limit: query = query.limit(limit) return [update_request.to_dict() for update_request in query.all()] @transactional_session def update_collection_replica(update_request, session=None): """ Update a collection replica. :param update_request: update request from the upated_col_rep table. """ if update_request['rse_id'] is not None: # Check one specific dataset replica ds_length = 0 old_available_replicas = 0 ds_bytes = 0 ds_replica_state = None ds_available_bytes = 0 available_replicas = 0 try: collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() ds_length = collection_replica.length old_available_replicas = collection_replica.available_replicas_cnt ds_bytes = collection_replica.bytes except NoResultFound: pass try: file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.rse_id == update_request['rse_id'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .one() available_replicas = file_replica.available_replicas ds_available_bytes = file_replica.ds_available_bytes except NoResultFound: pass if available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE if old_available_replicas > 0 and available_replicas == 0: session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .delete() else: updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=update_request['rse_id'])\ .one() updated_replica.state = ds_replica_state updated_replica.available_replicas_cnt = available_replicas updated_replica.length = ds_length updated_replica.bytes = ds_bytes updated_replica.available_bytes = ds_available_bytes else: # Check all dataset replicas association = session.query(models.DataIdentifierAssociation)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .with_entities(label('ds_length', func.count()), label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\ .one() ds_length = association.ds_length ds_bytes = association.ds_bytes ds_replica_state = None collection_replicas = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'])\ .all() for collection_replica in collection_replicas: if ds_length: collection_replica.length = ds_length else: collection_replica.length = 0 if ds_bytes: collection_replica.bytes = ds_bytes else: collection_replica.bytes = 0 file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\ .filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope, models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.name == update_request['name'], models.RSEFileAssociation.state == ReplicaState.AVAILABLE, update_request['scope'] == models.DataIdentifierAssociation.scope)\ .with_entities(models.RSEFileAssociation.rse_id, label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)), label('available_replicas', func.count()))\ .group_by(models.RSEFileAssociation.rse_id)\ .all() for file_replica in file_replicas: if file_replica.available_replicas >= ds_length: ds_replica_state = ReplicaState.AVAILABLE else: ds_replica_state = ReplicaState.UNAVAILABLE collection_replica = session.query(models.CollectionReplica)\ .filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\ .first() if collection_replica: collection_replica.state = ds_replica_state collection_replica.available_replicas_cnt = file_replica.available_replicas collection_replica.available_bytes = file_replica.ds_available_bytes session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete() @read_session def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None): """ Returns a list of bad PFNs :param limit: The maximum number of replicas returned. :param thread: The assigned thread for this minos instance. :param total_threads: The total number of minos threads. :param session: The database session in use. returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at} """ result = [] query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path') query.order_by(models.BadPFNs.created_at) query = query.limit(limit) for path, state, reason, account, expires_at in query.yield_per(1000): result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}) return result @transactional_session def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None): """ Bulk add new bad replicas. :param replicas: the list of bad replicas. :param account: The account who declared the bad replicas. :param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE). :param session: The database session in use. :returns: True is successful. """ for replica in replicas: insert_new_row = True if state == BadFilesStatus.TEMPORARY_UNAVAILABLE: query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state) if query.count(): query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False) insert_new_row = False if insert_new_row: new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason, state=state, account=account, bytes=None, expires_at=expires_at) new_bad_replica.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!') raise exception.RucioException(error.args) return True @transactional_session def bulk_delete_bad_pfns(pfns, session=None): """ Bulk delete bad PFNs. :param pfns: the list of new files. :param session: The database session in use. :returns: True is successful. """ pfn_clause = [] for pfn in pfns: pfn_clause.append(models.BadPFNs.path == pfn) for chunk in chunks(pfn_clause, 100): query = session.query(models.BadPFNs).filter(or_(*chunk)) query.delete(synchronize_session=False) return True @transactional_session def bulk_delete_bad_replicas(bad_replicas, session=None): """ Bulk delete bad replica. :param bad_replicas: The list of bad replicas to delete (Dictionaries). :param session: The database session in use. :returns: True is successful. """ replica_clause = [] for replica in bad_replicas: replica_clause.append(and_(models.BadReplicas.scope == replica['scope'], models.BadReplicas.name == replica['name'], models.BadReplicas.rse_id == replica['rse_id'], models.BadReplicas.state == replica['state'])) for chunk in chunks(replica_clause, 100): session.query(models.BadReplicas).filter(or_(*chunk)).\ delete(synchronize_session=False) return True @transactional_session def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None): """ Add bad PFNs. :param pfns: the list of new files. :param account: The account who declared the bad replicas. :param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE. :param reason: A string describing the reason of the loss. :param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files. :param session: The database session in use. :returns: True is successful. """ if isinstance(state, string_types): rep_state = BadPFNStatus[state] else: rep_state = state pfns = clean_surls(pfns) for pfn in pfns: new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at) new_pfn = session.merge(new_pfn) new_pfn.save(session=session, flush=False) try: session.flush() except IntegrityError as error: raise exception.RucioException(error.args) except DatabaseError as error: raise exception.RucioException(error.args) except FlushError as error: if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]): raise exception.Duplicate('One PFN already exists!') raise exception.RucioException(error.args) return True @read_session def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None): """ List the expired temporary unavailable replicas :param total_workers: Number of total workers. :param worker_number: id of the executing worker. :param limit: The maximum number of replicas returned. :param session: The database session in use. """ query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\ filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\ filter(models.BadReplicas.expires_at < datetime.utcnow()).\ with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\ order_by(models.BadReplicas.expires_at) query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name') query = query.limit(limit) return query.all() @read_session def get_replicas_state(scope=None, name=None, session=None): """ Method used by the necromancer to get all the replicas of a DIDs :param scope: The scope of the file. :param name: The name of the file. :param session: The database session in use. :returns: A dictionary with the list of states as keys and the rse_ids as value """ query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name) states = {} for res in query.all(): rse_id, state = res if state not in states: states[state] = [] states[state].append(rse_id) return states @read_session def get_suspicious_files(rse_expression, filter=None, **kwargs): """ Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date, present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list. Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or be declared as <is_suspicious> in the bad_replicas table. Keyword Arguments: :param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago. :param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0. :param rse_expression: The RSE expression where the replicas are located. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list was declared for a replica since younger_than date. Allowed values = ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS'). :param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE than the one in the bad_replicas table will be taken into account. Default value = False. :param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False. :param session: The database session in use. Default value = None. :returns: a list of replicas: [{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...] """ younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10)) nattempts = kwargs.get("nattempts", 0) session = kwargs.get("session", None) exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D']) available_elsewhere = kwargs.get("available_elsewhere", False) is_suspicious = kwargs.get("is_suspicious", False) # only for the 2 web api used parameters, checking value types and assigning the default values if not isinstance(nattempts, int): nattempts = 0 if not isinstance(younger_than, datetime): younger_than = datetime.now() - timedelta(days=10) # assembling exclude_states_clause exclude_states_clause = [] for state in exclude_states: exclude_states_clause.append(BadFilesStatus(state)) # making aliases for bad_replicas and replicas tables bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias') replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias') # assembling the selection rse_clause rse_clause = [] if rse_expression: parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session) for rse in parsedexp: rse_clause.append(models.RSEFileAssociation.rse_id == rse['id']) # query base query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\ .filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id, models.RSEFileAssociation.scope == bad_replicas_alias.scope, models.RSEFileAssociation.name == bad_replicas_alias.name, bad_replicas_alias.created_at >= younger_than) if is_suspicious: query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS) if rse_clause: query = query.filter(or_(*rse_clause)) if available_elsewhere: available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE, replicas_alias.scope == bad_replicas_alias.scope, replicas_alias.name == bad_replicas_alias.name, replicas_alias.rse_id != bad_replicas_alias.rse_id))) query = query.filter(available_replica) # it is required that the selected replicas # do not occur as BAD/DELETED/LOST/RECOVERED/... # in the bad_replicas table during the same time window. other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope, models.BadReplicas.name == bad_replicas_alias.name, models.BadReplicas.created_at >= younger_than, models.BadReplicas.rse_id == bad_replicas_alias.rse_id, models.BadReplicas.state.in_(exclude_states_clause)))) query = query.filter(not_(other_states_present)) # finally, the results are grouped by RSE, scope, name and required to have # at least 'nattempts' occurrences in the result of the query per replica query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all() # print(query) # translating the rse_id to RSE name and assembling the return list of dictionaries result = [] rses = {} for cnt, scope, name, rse_id, created_at in query_result: if rse_id not in rses: rse = get_rse_name(rse_id=rse_id, session=session) rses[rse_id] = rse result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at}) return result @transactional_session def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None): """ Sets a tombstone on a replica. :param rse_id: ID of RSE. :param scope: scope of the replica DID. :param name: name of the replica DID. :param tombstone: the tombstone to set. Default is OBSOLETE :param session: database session in use. """ rowcount = session.query(models.RSEFileAssociation).filter( and_( models.RSEFileAssociation.rse_id == rse_id, models.RSEFileAssociation.name == name, models.RSEFileAssociation.scope == scope, ~exists().where( and_( models.ReplicaLock.rse_id == rse_id, models.ReplicaLock.name == name, models.ReplicaLock.scope == scope, ) ) ) ) \ .with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \ .update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False) if rowcount == 0: try: session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one() raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) except NoResultFound: raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session))) @read_session def get_RSEcoverage_of_dataset(scope, name, session=None): """ Get total bytes present on RSEs :param scope: Scope of the dataset :param name: Name of the dataset :param session: The db session. :return: Dictionary { rse_id : <total bytes present at rse_id> } """ query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes)) query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope, models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name, models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name, models.RSEFileAssociation.state != ReplicaState.BEING_DELETED, )) query = query.group_by(models.RSEFileAssociation.rse_id) result = {} for rse_id, total in query: if total: result[rse_id] = total return result
inverse_deriv2
Second derivative of the inverse link function g^(-1)(z). Parameters ---------- z : array_like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g^(-1)''(z) : ndarray The value of the second derivative of the inverse of the link function Notes ----- This method should be overwritten by subclasses. The inherited method is implemented through numerical differentiation.
''' Defines the link functions to be used with GLM and GEE families. ''' import numpy as np import scipy.stats FLOAT_EPS = np.finfo(float).eps class Link(object): """ A generic link function for one-parameter exponential family. `Link` does nothing, but lays out the methods expected of any subclass. """ def __call__(self, p): """ Return the value of the link function. This is just a placeholder. Parameters ---------- p : array_like Probabilities Returns ------- g(p) : array_like The value of the link function g(p) = z """ return NotImplementedError def inverse(self, z): """ Inverse of the link function. Just a placeholder. Parameters ---------- z : array_like `z` is usually the linear predictor of the transformed variable in the IRLS algorithm for GLM. Returns ------- g^(-1)(z) : ndarray The value of the inverse of the link function g^(-1)(z) = p """ return NotImplementedError def deriv(self, p): """ Derivative of the link function g'(p). Just a placeholder. Parameters ---------- p : array_like Returns ------- g'(p) : ndarray The value of the derivative of the link function g'(p) """ return NotImplementedError def deriv2(self, p): """Second derivative of the link function g''(p) implemented through numerical differentiation """ from statsmodels.tools.numdiff import _approx_fprime_cs_scalar return _approx_fprime_cs_scalar(p, self.deriv) def inverse_deriv(self, z): """ Derivative of the inverse link function g^(-1)(z). Parameters ---------- z : array_like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g'^(-1)(z) : ndarray The value of the derivative of the inverse of the link function Notes ----- This reference implementation gives the correct result but is inefficient, so it can be overridden in subclasses. """ return 1 / self.deriv(self.inverse(z)) def inverse_deriv2(self, z): """ Second derivative of the inverse link function g^(-1)(z). Parameters ---------- z : array_like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g'^(-1)(z) : ndarray The value of the second derivative of the inverse of the link function Notes ----- This reference implementation gives the correct result but is inefficient, so it can be overridden in subclasses. """ iz = self.inverse(z) return -self.deriv2(iz) / self.deriv(iz)**3 class Logit(Link): """ The logit transform Notes ----- call and derivative use a private method _clean to make trim p by machine epsilon so that p is in (0,1) Alias of Logit: logit = Logit() """ def _clean(self, p): """ Clip logistic values to range (eps, 1-eps) Parameters ---------- p : array_like Probabilities Returns ------- pclip : ndarray Clipped probabilities """ return np.clip(p, FLOAT_EPS, 1. - FLOAT_EPS) def __call__(self, p): """ The logit transform Parameters ---------- p : array_like Probabilities Returns ------- z : ndarray Logit transform of `p` Notes ----- g(p) = log(p / (1 - p)) """ p = self._clean(p) return np.log(p / (1. - p)) def inverse(self, z): """ Inverse of the logit transform Parameters ---------- z : array_like The value of the logit transform at `p` Returns ------- p : ndarray Probabilities Notes ----- g^(-1)(z) = exp(z)/(1+exp(z)) """ z = np.asarray(z) t = np.exp(-z) return 1. / (1. + t) def deriv(self, p): """ Derivative of the logit transform Parameters ---------- p : array_like Probabilities Returns ------- g'(p) : ndarray Value of the derivative of logit transform at `p` Notes ----- g'(p) = 1 / (p * (1 - p)) Alias for `Logit`: logit = Logit() """ p = self._clean(p) return 1. / (p * (1 - p)) def inverse_deriv(self, z): """ Derivative of the inverse of the logit transform Parameters ---------- z : array_like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g'^(-1)(z) : ndarray The value of the derivative of the inverse of the logit function """ t = np.exp(z) return t/(1 + t)**2 def deriv2(self, p): """ Second derivative of the logit function. Parameters ---------- p : array_like probabilities Returns ------- g''(z) : ndarray The value of the second derivative of the logit function """ v = p * (1 - p) return (2*p - 1) / v**2 class logit(Logit): pass class Power(Link): """ The power transform Parameters ---------- power : float The exponent of the power transform Notes ----- Aliases of Power: inverse = Power(power=-1) sqrt = Power(power=.5) inverse_squared = Power(power=-2.) identity = Power(power=1.) """ def __init__(self, power=1.): self.power = power def __call__(self, p): """ Power transform link function Parameters ---------- p : array_like Mean parameters Returns ------- z : array_like Power transform of x Notes ----- g(p) = x**self.power """ if self.power == 1: return p else: return np.power(p, self.power) def inverse(self, z): """ Inverse of the power transform link function Parameters ---------- `z` : array_like Value of the transformed mean parameters at `p` Returns ------- `p` : ndarray Mean parameters Notes ----- g^(-1)(z`) = `z`**(1/`power`) """ if self.power == 1: return z else: return np.power(z, 1. / self.power) def deriv(self, p): """ Derivative of the power transform Parameters ---------- p : array_like Mean parameters Returns ------- g'(p) : ndarray Derivative of power transform of `p` Notes ----- g'(`p`) = `power` * `p`**(`power` - 1) """ if self.power == 1: return np.ones_like(p) else: return self.power * np.power(p, self.power - 1) def deriv2(self, p): """ Second derivative of the power transform Parameters ---------- p : array_like Mean parameters Returns ------- g''(p) : ndarray Second derivative of the power transform of `p` Notes ----- g''(`p`) = `power` * (`power` - 1) * `p`**(`power` - 2) """ if self.power == 1: return np.zeros_like(p) else: return self.power * (self.power - 1) * np.power(p, self.power - 2) def inverse_deriv(self, z): """ Derivative of the inverse of the power transform Parameters ---------- z : array_like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g^(-1)'(z) : ndarray The value of the derivative of the inverse of the power transform function """ if self.power == 1: return np.ones_like(z) else: return np.power(z, (1 - self.power)/self.power) / self.power def inverse_deriv2(self, z): """ Second derivative of the inverse of the power transform Parameters ---------- z : array_like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g^(-1)'(z) : ndarray The value of the derivative of the inverse of the power transform function """ if self.power == 1: return np.zeros_like(z) else: return ((1 - self.power) * np.power(z, (1 - 2*self.power)/self.power) / self.power**2) class inverse_power(Power): """ The inverse transform Notes ----- g(p) = 1/p Alias of statsmodels.family.links.Power(power=-1.) """ def __init__(self): super(inverse_power, self).__init__(power=-1.) class sqrt(Power): """ The square-root transform Notes ----- g(`p`) = sqrt(`p`) Alias of statsmodels.family.links.Power(power=.5) """ def __init__(self): super(sqrt, self).__init__(power=.5) class inverse_squared(Power): r""" The inverse squared transform Notes ----- g(`p`) = 1/(`p`\*\*2) Alias of statsmodels.family.links.Power(power=2.) """ def __init__(self): super(inverse_squared, self).__init__(power=-2.) class identity(Power): """ The identity transform Notes ----- g(`p`) = `p` Alias of statsmodels.family.links.Power(power=1.) """ def __init__(self): super(identity, self).__init__(power=1.) class Log(Link): """ The log transform Notes ----- call and derivative call a private method _clean to trim the data by machine epsilon so that p is in (0,1). log is an alias of Log. """ def _clean(self, x): return np.clip(x, FLOAT_EPS, np.inf) def __call__(self, p, **extra): """ Log transform link function Parameters ---------- x : array_like Mean parameters Returns ------- z : ndarray log(x) Notes ----- g(p) = log(p) """ x = self._clean(p) return np.log(x) def inverse(self, z): """ Inverse of log transform link function Parameters ---------- z : ndarray The inverse of the link function at `p` Returns ------- p : ndarray The mean probabilities given the value of the inverse `z` Notes ----- g^{-1}(z) = exp(z) """ return np.exp(z) def deriv(self, p): """ Derivative of log transform link function Parameters ---------- p : array_like Mean parameters Returns ------- g'(p) : ndarray derivative of log transform of x Notes ----- g'(x) = 1/x """ p = self._clean(p) return 1. / p def deriv2(self, p): """ Second derivative of the log transform link function Parameters ---------- p : array_like Mean parameters Returns ------- g''(p) : ndarray Second derivative of log transform of x Notes ----- g''(x) = -1/x^2 """ p = self._clean(p) return -1. / p**2 def inverse_deriv(self, z): """ Derivative of the inverse of the log transform link function Parameters ---------- z : ndarray The inverse of the link function at `p` Returns ------- g^(-1)'(z) : ndarray The value of the derivative of the inverse of the log function, the exponential function """ return np.exp(z) class log(Log): """ The log transform Notes ----- log is a an alias of Log. """ pass # TODO: the CDFLink is untested class CDFLink(Logit): """ The use the CDF of a scipy.stats distribution CDFLink is a subclass of logit in order to use its _clean method for the link and its derivative. Parameters ---------- dbn : scipy.stats distribution Default is dbn=scipy.stats.norm Notes ----- The CDF link is untested. """ def __init__(self, dbn=scipy.stats.norm): self.dbn = dbn def __call__(self, p): """ CDF link function Parameters ---------- p : array_like Mean parameters Returns ------- z : ndarray (ppf) inverse of CDF transform of p Notes ----- g(`p`) = `dbn`.ppf(`p`) """ p = self._clean(p) return self.dbn.ppf(p) def inverse(self, z): """ The inverse of the CDF link Parameters ---------- z : array_like The value of the inverse of the link function at `p` Returns ------- p : ndarray Mean probabilities. The value of the inverse of CDF link of `z` Notes ----- g^(-1)(`z`) = `dbn`.cdf(`z`) """ return self.dbn.cdf(z) def deriv(self, p): """ Derivative of CDF link Parameters ---------- p : array_like mean parameters Returns ------- g'(p) : ndarray The derivative of CDF transform at `p` Notes ----- g'(`p`) = 1./ `dbn`.pdf(`dbn`.ppf(`p`)) """ p = self._clean(p) return 1. / self.dbn.pdf(self.dbn.ppf(p)) def deriv2(self, p): """ Second derivative of the link function g''(p) implemented through numerical differentiation """ p = self._clean(p) linpred = self.dbn.ppf(p) return - self.inverse_deriv2(linpred) / self.dbn.pdf(linpred)**3 def deriv2_numdiff(self, p): """ Second derivative of the link function g''(p) implemented through numerical differentiation """ from statsmodels.tools.numdiff import _approx_fprime_scalar p = np.atleast_1d(p) # Note: special function for norm.ppf does not support complex return _approx_fprime_scalar(p, self.deriv, centered=True) def inverse_deriv(self, z): """ Derivative of the inverse link function Parameters ---------- z : ndarray The inverse of the link function at `p` Returns ------- g^(-1)'(z) : ndarray The value of the derivative of the inverse of the logit function. This is just the pdf in a CDFLink, """ return self.dbn.pdf(z) # MASKED: inverse_deriv2 function (lines 710-735) class probit(CDFLink): """ The probit (standard normal CDF) transform Notes ----- g(p) = scipy.stats.norm.ppf(p) probit is an alias of CDFLink. """ def inverse_deriv2(self, z): """ Second derivative of the inverse link function This is the derivative of the pdf in a CDFLink """ return - z * self.dbn.pdf(z) def deriv2(self, p): """ Second derivative of the link function g''(p) """ p = self._clean(p) linpred = self.dbn.ppf(p) return linpred / self.dbn.pdf(linpred)**2 class cauchy(CDFLink): """ The Cauchy (standard Cauchy CDF) transform Notes ----- g(p) = scipy.stats.cauchy.ppf(p) cauchy is an alias of CDFLink with dbn=scipy.stats.cauchy """ def __init__(self): super(cauchy, self).__init__(dbn=scipy.stats.cauchy) def deriv2(self, p): """ Second derivative of the Cauchy link function. Parameters ---------- p : array_like Probabilities Returns ------- g''(p) : ndarray Value of the second derivative of Cauchy link function at `p` """ p = self._clean(p) a = np.pi * (p - 0.5) d2 = 2 * np.pi**2 * np.sin(a) / np.cos(a)**3 return d2 def inverse_deriv2(self, z): return - 2 * z / (np.pi * (z**2 + 1)**2) class CLogLog(Logit): """ The complementary log-log transform CLogLog inherits from Logit in order to have access to its _clean method for the link and its derivative. Notes ----- CLogLog is untested. """ def __call__(self, p): """ C-Log-Log transform link function Parameters ---------- p : ndarray Mean parameters Returns ------- z : ndarray The CLogLog transform of `p` Notes ----- g(p) = log(-log(1-p)) """ p = self._clean(p) return np.log(-np.log(1 - p)) def inverse(self, z): """ Inverse of C-Log-Log transform link function Parameters ---------- z : array_like The value of the inverse of the CLogLog link function at `p` Returns ------- p : ndarray Mean parameters Notes ----- g^(-1)(`z`) = 1-exp(-exp(`z`)) """ return 1 - np.exp(-np.exp(z)) def deriv(self, p): """ Derivative of C-Log-Log transform link function Parameters ---------- p : array_like Mean parameters Returns ------- g'(p) : ndarray The derivative of the CLogLog transform link function Notes ----- g'(p) = - 1 / ((p-1)*log(1-p)) """ p = self._clean(p) return 1. / ((p - 1) * (np.log(1 - p))) def deriv2(self, p): """ Second derivative of the C-Log-Log ink function Parameters ---------- p : array_like Mean parameters Returns ------- g''(p) : ndarray The second derivative of the CLogLog link function """ p = self._clean(p) fl = np.log(1 - p) d2 = -1 / ((1 - p)**2 * fl) d2 *= 1 + 1 / fl return d2 def inverse_deriv(self, z): """ Derivative of the inverse of the C-Log-Log transform link function Parameters ---------- z : array_like The value of the inverse of the CLogLog link function at `p` Returns ------- g^(-1)'(z) : ndarray The derivative of the inverse of the CLogLog link function """ return np.exp(z - np.exp(z)) class cloglog(CLogLog): """ The CLogLog transform link function. Notes ----- g(`p`) = log(-log(1-`p`)) cloglog is an alias for CLogLog cloglog = CLogLog() """ pass class LogLog(Logit): """ The log-log transform LogLog inherits from Logit in order to have access to its _clean method for the link and its derivative. """ def __call__(self, p): """ Log-Log transform link function Parameters ---------- p : ndarray Mean parameters Returns ------- z : ndarray The LogLog transform of `p` Notes ----- g(p) = -log(-log(p)) """ p = self._clean(p) return -np.log(-np.log(p)) def inverse(self, z): """ Inverse of Log-Log transform link function Parameters ---------- z : array_like The value of the inverse of the LogLog link function at `p` Returns ------- p : ndarray Mean parameters Notes ----- g^(-1)(`z`) = exp(-exp(-`z`)) """ return np.exp(-np.exp(-z)) def deriv(self, p): """ Derivative of Log-Log transform link function Parameters ---------- p : array_like Mean parameters Returns ------- g'(p) : ndarray The derivative of the LogLog transform link function Notes ----- g'(p) = - 1 /(p * log(p)) """ p = self._clean(p) return -1. / (p * (np.log(p))) def deriv2(self, p): """ Second derivative of the Log-Log link function Parameters ---------- p : array_like Mean parameters Returns ------- g''(p) : ndarray The second derivative of the LogLog link function """ p = self._clean(p) d2 = (1 + np.log(p)) / (p * (np.log(p)))**2 return d2 def inverse_deriv(self, z): """ Derivative of the inverse of the Log-Log transform link function Parameters ---------- z : array_like The value of the inverse of the LogLog link function at `p` Returns ------- g^(-1)'(z) : ndarray The derivative of the inverse of the LogLog link function """ return np.exp(-np.exp(-z) - z) def inverse_deriv2(self, z): """ Second derivative of the inverse of the Log-Log transform link function Parameters ---------- z : array_like The value of the inverse of the LogLog link function at `p` Returns ------- g^(-1)''(z) : ndarray The second derivative of the inverse of the LogLog link function """ return self.inverse_deriv(z) * (np.exp(-z) - 1) class loglog(LogLog): """ The LogLog transform link function. Notes ----- g(`p`) = -log(-log(`p`)) loglog is an alias for LogLog loglog = LogLog() """ pass class NegativeBinomial(Link): ''' The negative binomial link function Parameters ---------- alpha : float, optional Alpha is the ancillary parameter of the Negative Binomial link function. It is assumed to be nonstochastic. The default value is 1. Permissible values are usually assumed to be in (.01, 2). ''' def __init__(self, alpha=1.): self.alpha = alpha def _clean(self, x): return np.clip(x, FLOAT_EPS, np.inf) def __call__(self, p): ''' Negative Binomial transform link function Parameters ---------- p : array_like Mean parameters Returns ------- z : ndarray The negative binomial transform of `p` Notes ----- g(p) = log(p/(p + 1/alpha)) ''' p = self._clean(p) return np.log(p/(p + 1/self.alpha)) def inverse(self, z): ''' Inverse of the negative binomial transform Parameters ---------- z : array_like The value of the inverse of the negative binomial link at `p`. Returns ------- p : ndarray Mean parameters Notes ----- g^(-1)(z) = exp(z)/(alpha*(1-exp(z))) ''' return -1/(self.alpha * (1 - np.exp(-z))) def deriv(self, p): ''' Derivative of the negative binomial transform Parameters ---------- p : array_like Mean parameters Returns ------- g'(p) : ndarray The derivative of the negative binomial transform link function Notes ----- g'(x) = 1/(x+alpha*x^2) ''' return 1/(p + self.alpha * p**2) def deriv2(self, p): ''' Second derivative of the negative binomial link function. Parameters ---------- p : array_like Mean parameters Returns ------- g''(p) : ndarray The second derivative of the negative binomial transform link function Notes ----- g''(x) = -(1+2*alpha*x)/(x+alpha*x^2)^2 ''' numer = -(1 + 2 * self.alpha * p) denom = (p + self.alpha * p**2)**2 return numer / denom def inverse_deriv(self, z): ''' Derivative of the inverse of the negative binomial transform Parameters ---------- z : array_like Usually the linear predictor for a GLM or GEE model Returns ------- g^(-1)'(z) : ndarray The value of the derivative of the inverse of the negative binomial link ''' t = np.exp(z) return t / (self.alpha * (1-t)**2) class nbinom(NegativeBinomial): """ The negative binomial link function. Notes ----- g(p) = log(p/(p + 1/alpha)) nbinom is an alias of NegativeBinomial. nbinom = NegativeBinomial(alpha=1.) """ pass
def inverse_deriv2(self, z): """ Second derivative of the inverse link function g^(-1)(z). Parameters ---------- z : array_like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g^(-1)''(z) : ndarray The value of the second derivative of the inverse of the link function Notes ----- This method should be overwritten by subclasses. The inherited method is implemented through numerical differentiation. """ from statsmodels.tools.numdiff import _approx_fprime_scalar z = np.atleast_1d(z) # Note: special function for norm.ppf does not support complex return _approx_fprime_scalar(z, self.inverse_deriv, centered=True)
710
735
''' Defines the link functions to be used with GLM and GEE families. ''' import numpy as np import scipy.stats FLOAT_EPS = np.finfo(float).eps class Link(object): """ A generic link function for one-parameter exponential family. `Link` does nothing, but lays out the methods expected of any subclass. """ def __call__(self, p): """ Return the value of the link function. This is just a placeholder. Parameters ---------- p : array_like Probabilities Returns ------- g(p) : array_like The value of the link function g(p) = z """ return NotImplementedError def inverse(self, z): """ Inverse of the link function. Just a placeholder. Parameters ---------- z : array_like `z` is usually the linear predictor of the transformed variable in the IRLS algorithm for GLM. Returns ------- g^(-1)(z) : ndarray The value of the inverse of the link function g^(-1)(z) = p """ return NotImplementedError def deriv(self, p): """ Derivative of the link function g'(p). Just a placeholder. Parameters ---------- p : array_like Returns ------- g'(p) : ndarray The value of the derivative of the link function g'(p) """ return NotImplementedError def deriv2(self, p): """Second derivative of the link function g''(p) implemented through numerical differentiation """ from statsmodels.tools.numdiff import _approx_fprime_cs_scalar return _approx_fprime_cs_scalar(p, self.deriv) def inverse_deriv(self, z): """ Derivative of the inverse link function g^(-1)(z). Parameters ---------- z : array_like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g'^(-1)(z) : ndarray The value of the derivative of the inverse of the link function Notes ----- This reference implementation gives the correct result but is inefficient, so it can be overridden in subclasses. """ return 1 / self.deriv(self.inverse(z)) def inverse_deriv2(self, z): """ Second derivative of the inverse link function g^(-1)(z). Parameters ---------- z : array_like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g'^(-1)(z) : ndarray The value of the second derivative of the inverse of the link function Notes ----- This reference implementation gives the correct result but is inefficient, so it can be overridden in subclasses. """ iz = self.inverse(z) return -self.deriv2(iz) / self.deriv(iz)**3 class Logit(Link): """ The logit transform Notes ----- call and derivative use a private method _clean to make trim p by machine epsilon so that p is in (0,1) Alias of Logit: logit = Logit() """ def _clean(self, p): """ Clip logistic values to range (eps, 1-eps) Parameters ---------- p : array_like Probabilities Returns ------- pclip : ndarray Clipped probabilities """ return np.clip(p, FLOAT_EPS, 1. - FLOAT_EPS) def __call__(self, p): """ The logit transform Parameters ---------- p : array_like Probabilities Returns ------- z : ndarray Logit transform of `p` Notes ----- g(p) = log(p / (1 - p)) """ p = self._clean(p) return np.log(p / (1. - p)) def inverse(self, z): """ Inverse of the logit transform Parameters ---------- z : array_like The value of the logit transform at `p` Returns ------- p : ndarray Probabilities Notes ----- g^(-1)(z) = exp(z)/(1+exp(z)) """ z = np.asarray(z) t = np.exp(-z) return 1. / (1. + t) def deriv(self, p): """ Derivative of the logit transform Parameters ---------- p : array_like Probabilities Returns ------- g'(p) : ndarray Value of the derivative of logit transform at `p` Notes ----- g'(p) = 1 / (p * (1 - p)) Alias for `Logit`: logit = Logit() """ p = self._clean(p) return 1. / (p * (1 - p)) def inverse_deriv(self, z): """ Derivative of the inverse of the logit transform Parameters ---------- z : array_like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g'^(-1)(z) : ndarray The value of the derivative of the inverse of the logit function """ t = np.exp(z) return t/(1 + t)**2 def deriv2(self, p): """ Second derivative of the logit function. Parameters ---------- p : array_like probabilities Returns ------- g''(z) : ndarray The value of the second derivative of the logit function """ v = p * (1 - p) return (2*p - 1) / v**2 class logit(Logit): pass class Power(Link): """ The power transform Parameters ---------- power : float The exponent of the power transform Notes ----- Aliases of Power: inverse = Power(power=-1) sqrt = Power(power=.5) inverse_squared = Power(power=-2.) identity = Power(power=1.) """ def __init__(self, power=1.): self.power = power def __call__(self, p): """ Power transform link function Parameters ---------- p : array_like Mean parameters Returns ------- z : array_like Power transform of x Notes ----- g(p) = x**self.power """ if self.power == 1: return p else: return np.power(p, self.power) def inverse(self, z): """ Inverse of the power transform link function Parameters ---------- `z` : array_like Value of the transformed mean parameters at `p` Returns ------- `p` : ndarray Mean parameters Notes ----- g^(-1)(z`) = `z`**(1/`power`) """ if self.power == 1: return z else: return np.power(z, 1. / self.power) def deriv(self, p): """ Derivative of the power transform Parameters ---------- p : array_like Mean parameters Returns ------- g'(p) : ndarray Derivative of power transform of `p` Notes ----- g'(`p`) = `power` * `p`**(`power` - 1) """ if self.power == 1: return np.ones_like(p) else: return self.power * np.power(p, self.power - 1) def deriv2(self, p): """ Second derivative of the power transform Parameters ---------- p : array_like Mean parameters Returns ------- g''(p) : ndarray Second derivative of the power transform of `p` Notes ----- g''(`p`) = `power` * (`power` - 1) * `p`**(`power` - 2) """ if self.power == 1: return np.zeros_like(p) else: return self.power * (self.power - 1) * np.power(p, self.power - 2) def inverse_deriv(self, z): """ Derivative of the inverse of the power transform Parameters ---------- z : array_like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g^(-1)'(z) : ndarray The value of the derivative of the inverse of the power transform function """ if self.power == 1: return np.ones_like(z) else: return np.power(z, (1 - self.power)/self.power) / self.power def inverse_deriv2(self, z): """ Second derivative of the inverse of the power transform Parameters ---------- z : array_like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g^(-1)'(z) : ndarray The value of the derivative of the inverse of the power transform function """ if self.power == 1: return np.zeros_like(z) else: return ((1 - self.power) * np.power(z, (1 - 2*self.power)/self.power) / self.power**2) class inverse_power(Power): """ The inverse transform Notes ----- g(p) = 1/p Alias of statsmodels.family.links.Power(power=-1.) """ def __init__(self): super(inverse_power, self).__init__(power=-1.) class sqrt(Power): """ The square-root transform Notes ----- g(`p`) = sqrt(`p`) Alias of statsmodels.family.links.Power(power=.5) """ def __init__(self): super(sqrt, self).__init__(power=.5) class inverse_squared(Power): r""" The inverse squared transform Notes ----- g(`p`) = 1/(`p`\*\*2) Alias of statsmodels.family.links.Power(power=2.) """ def __init__(self): super(inverse_squared, self).__init__(power=-2.) class identity(Power): """ The identity transform Notes ----- g(`p`) = `p` Alias of statsmodels.family.links.Power(power=1.) """ def __init__(self): super(identity, self).__init__(power=1.) class Log(Link): """ The log transform Notes ----- call and derivative call a private method _clean to trim the data by machine epsilon so that p is in (0,1). log is an alias of Log. """ def _clean(self, x): return np.clip(x, FLOAT_EPS, np.inf) def __call__(self, p, **extra): """ Log transform link function Parameters ---------- x : array_like Mean parameters Returns ------- z : ndarray log(x) Notes ----- g(p) = log(p) """ x = self._clean(p) return np.log(x) def inverse(self, z): """ Inverse of log transform link function Parameters ---------- z : ndarray The inverse of the link function at `p` Returns ------- p : ndarray The mean probabilities given the value of the inverse `z` Notes ----- g^{-1}(z) = exp(z) """ return np.exp(z) def deriv(self, p): """ Derivative of log transform link function Parameters ---------- p : array_like Mean parameters Returns ------- g'(p) : ndarray derivative of log transform of x Notes ----- g'(x) = 1/x """ p = self._clean(p) return 1. / p def deriv2(self, p): """ Second derivative of the log transform link function Parameters ---------- p : array_like Mean parameters Returns ------- g''(p) : ndarray Second derivative of log transform of x Notes ----- g''(x) = -1/x^2 """ p = self._clean(p) return -1. / p**2 def inverse_deriv(self, z): """ Derivative of the inverse of the log transform link function Parameters ---------- z : ndarray The inverse of the link function at `p` Returns ------- g^(-1)'(z) : ndarray The value of the derivative of the inverse of the log function, the exponential function """ return np.exp(z) class log(Log): """ The log transform Notes ----- log is a an alias of Log. """ pass # TODO: the CDFLink is untested class CDFLink(Logit): """ The use the CDF of a scipy.stats distribution CDFLink is a subclass of logit in order to use its _clean method for the link and its derivative. Parameters ---------- dbn : scipy.stats distribution Default is dbn=scipy.stats.norm Notes ----- The CDF link is untested. """ def __init__(self, dbn=scipy.stats.norm): self.dbn = dbn def __call__(self, p): """ CDF link function Parameters ---------- p : array_like Mean parameters Returns ------- z : ndarray (ppf) inverse of CDF transform of p Notes ----- g(`p`) = `dbn`.ppf(`p`) """ p = self._clean(p) return self.dbn.ppf(p) def inverse(self, z): """ The inverse of the CDF link Parameters ---------- z : array_like The value of the inverse of the link function at `p` Returns ------- p : ndarray Mean probabilities. The value of the inverse of CDF link of `z` Notes ----- g^(-1)(`z`) = `dbn`.cdf(`z`) """ return self.dbn.cdf(z) def deriv(self, p): """ Derivative of CDF link Parameters ---------- p : array_like mean parameters Returns ------- g'(p) : ndarray The derivative of CDF transform at `p` Notes ----- g'(`p`) = 1./ `dbn`.pdf(`dbn`.ppf(`p`)) """ p = self._clean(p) return 1. / self.dbn.pdf(self.dbn.ppf(p)) def deriv2(self, p): """ Second derivative of the link function g''(p) implemented through numerical differentiation """ p = self._clean(p) linpred = self.dbn.ppf(p) return - self.inverse_deriv2(linpred) / self.dbn.pdf(linpred)**3 def deriv2_numdiff(self, p): """ Second derivative of the link function g''(p) implemented through numerical differentiation """ from statsmodels.tools.numdiff import _approx_fprime_scalar p = np.atleast_1d(p) # Note: special function for norm.ppf does not support complex return _approx_fprime_scalar(p, self.deriv, centered=True) def inverse_deriv(self, z): """ Derivative of the inverse link function Parameters ---------- z : ndarray The inverse of the link function at `p` Returns ------- g^(-1)'(z) : ndarray The value of the derivative of the inverse of the logit function. This is just the pdf in a CDFLink, """ return self.dbn.pdf(z) def inverse_deriv2(self, z): """ Second derivative of the inverse link function g^(-1)(z). Parameters ---------- z : array_like `z` is usually the linear predictor for a GLM or GEE model. Returns ------- g^(-1)''(z) : ndarray The value of the second derivative of the inverse of the link function Notes ----- This method should be overwritten by subclasses. The inherited method is implemented through numerical differentiation. """ from statsmodels.tools.numdiff import _approx_fprime_scalar z = np.atleast_1d(z) # Note: special function for norm.ppf does not support complex return _approx_fprime_scalar(z, self.inverse_deriv, centered=True) class probit(CDFLink): """ The probit (standard normal CDF) transform Notes ----- g(p) = scipy.stats.norm.ppf(p) probit is an alias of CDFLink. """ def inverse_deriv2(self, z): """ Second derivative of the inverse link function This is the derivative of the pdf in a CDFLink """ return - z * self.dbn.pdf(z) def deriv2(self, p): """ Second derivative of the link function g''(p) """ p = self._clean(p) linpred = self.dbn.ppf(p) return linpred / self.dbn.pdf(linpred)**2 class cauchy(CDFLink): """ The Cauchy (standard Cauchy CDF) transform Notes ----- g(p) = scipy.stats.cauchy.ppf(p) cauchy is an alias of CDFLink with dbn=scipy.stats.cauchy """ def __init__(self): super(cauchy, self).__init__(dbn=scipy.stats.cauchy) def deriv2(self, p): """ Second derivative of the Cauchy link function. Parameters ---------- p : array_like Probabilities Returns ------- g''(p) : ndarray Value of the second derivative of Cauchy link function at `p` """ p = self._clean(p) a = np.pi * (p - 0.5) d2 = 2 * np.pi**2 * np.sin(a) / np.cos(a)**3 return d2 def inverse_deriv2(self, z): return - 2 * z / (np.pi * (z**2 + 1)**2) class CLogLog(Logit): """ The complementary log-log transform CLogLog inherits from Logit in order to have access to its _clean method for the link and its derivative. Notes ----- CLogLog is untested. """ def __call__(self, p): """ C-Log-Log transform link function Parameters ---------- p : ndarray Mean parameters Returns ------- z : ndarray The CLogLog transform of `p` Notes ----- g(p) = log(-log(1-p)) """ p = self._clean(p) return np.log(-np.log(1 - p)) def inverse(self, z): """ Inverse of C-Log-Log transform link function Parameters ---------- z : array_like The value of the inverse of the CLogLog link function at `p` Returns ------- p : ndarray Mean parameters Notes ----- g^(-1)(`z`) = 1-exp(-exp(`z`)) """ return 1 - np.exp(-np.exp(z)) def deriv(self, p): """ Derivative of C-Log-Log transform link function Parameters ---------- p : array_like Mean parameters Returns ------- g'(p) : ndarray The derivative of the CLogLog transform link function Notes ----- g'(p) = - 1 / ((p-1)*log(1-p)) """ p = self._clean(p) return 1. / ((p - 1) * (np.log(1 - p))) def deriv2(self, p): """ Second derivative of the C-Log-Log ink function Parameters ---------- p : array_like Mean parameters Returns ------- g''(p) : ndarray The second derivative of the CLogLog link function """ p = self._clean(p) fl = np.log(1 - p) d2 = -1 / ((1 - p)**2 * fl) d2 *= 1 + 1 / fl return d2 def inverse_deriv(self, z): """ Derivative of the inverse of the C-Log-Log transform link function Parameters ---------- z : array_like The value of the inverse of the CLogLog link function at `p` Returns ------- g^(-1)'(z) : ndarray The derivative of the inverse of the CLogLog link function """ return np.exp(z - np.exp(z)) class cloglog(CLogLog): """ The CLogLog transform link function. Notes ----- g(`p`) = log(-log(1-`p`)) cloglog is an alias for CLogLog cloglog = CLogLog() """ pass class LogLog(Logit): """ The log-log transform LogLog inherits from Logit in order to have access to its _clean method for the link and its derivative. """ def __call__(self, p): """ Log-Log transform link function Parameters ---------- p : ndarray Mean parameters Returns ------- z : ndarray The LogLog transform of `p` Notes ----- g(p) = -log(-log(p)) """ p = self._clean(p) return -np.log(-np.log(p)) def inverse(self, z): """ Inverse of Log-Log transform link function Parameters ---------- z : array_like The value of the inverse of the LogLog link function at `p` Returns ------- p : ndarray Mean parameters Notes ----- g^(-1)(`z`) = exp(-exp(-`z`)) """ return np.exp(-np.exp(-z)) def deriv(self, p): """ Derivative of Log-Log transform link function Parameters ---------- p : array_like Mean parameters Returns ------- g'(p) : ndarray The derivative of the LogLog transform link function Notes ----- g'(p) = - 1 /(p * log(p)) """ p = self._clean(p) return -1. / (p * (np.log(p))) def deriv2(self, p): """ Second derivative of the Log-Log link function Parameters ---------- p : array_like Mean parameters Returns ------- g''(p) : ndarray The second derivative of the LogLog link function """ p = self._clean(p) d2 = (1 + np.log(p)) / (p * (np.log(p)))**2 return d2 def inverse_deriv(self, z): """ Derivative of the inverse of the Log-Log transform link function Parameters ---------- z : array_like The value of the inverse of the LogLog link function at `p` Returns ------- g^(-1)'(z) : ndarray The derivative of the inverse of the LogLog link function """ return np.exp(-np.exp(-z) - z) def inverse_deriv2(self, z): """ Second derivative of the inverse of the Log-Log transform link function Parameters ---------- z : array_like The value of the inverse of the LogLog link function at `p` Returns ------- g^(-1)''(z) : ndarray The second derivative of the inverse of the LogLog link function """ return self.inverse_deriv(z) * (np.exp(-z) - 1) class loglog(LogLog): """ The LogLog transform link function. Notes ----- g(`p`) = -log(-log(`p`)) loglog is an alias for LogLog loglog = LogLog() """ pass class NegativeBinomial(Link): ''' The negative binomial link function Parameters ---------- alpha : float, optional Alpha is the ancillary parameter of the Negative Binomial link function. It is assumed to be nonstochastic. The default value is 1. Permissible values are usually assumed to be in (.01, 2). ''' def __init__(self, alpha=1.): self.alpha = alpha def _clean(self, x): return np.clip(x, FLOAT_EPS, np.inf) def __call__(self, p): ''' Negative Binomial transform link function Parameters ---------- p : array_like Mean parameters Returns ------- z : ndarray The negative binomial transform of `p` Notes ----- g(p) = log(p/(p + 1/alpha)) ''' p = self._clean(p) return np.log(p/(p + 1/self.alpha)) def inverse(self, z): ''' Inverse of the negative binomial transform Parameters ---------- z : array_like The value of the inverse of the negative binomial link at `p`. Returns ------- p : ndarray Mean parameters Notes ----- g^(-1)(z) = exp(z)/(alpha*(1-exp(z))) ''' return -1/(self.alpha * (1 - np.exp(-z))) def deriv(self, p): ''' Derivative of the negative binomial transform Parameters ---------- p : array_like Mean parameters Returns ------- g'(p) : ndarray The derivative of the negative binomial transform link function Notes ----- g'(x) = 1/(x+alpha*x^2) ''' return 1/(p + self.alpha * p**2) def deriv2(self, p): ''' Second derivative of the negative binomial link function. Parameters ---------- p : array_like Mean parameters Returns ------- g''(p) : ndarray The second derivative of the negative binomial transform link function Notes ----- g''(x) = -(1+2*alpha*x)/(x+alpha*x^2)^2 ''' numer = -(1 + 2 * self.alpha * p) denom = (p + self.alpha * p**2)**2 return numer / denom def inverse_deriv(self, z): ''' Derivative of the inverse of the negative binomial transform Parameters ---------- z : array_like Usually the linear predictor for a GLM or GEE model Returns ------- g^(-1)'(z) : ndarray The value of the derivative of the inverse of the negative binomial link ''' t = np.exp(z) return t / (self.alpha * (1-t)**2) class nbinom(NegativeBinomial): """ The negative binomial link function. Notes ----- g(p) = log(p/(p + 1/alpha)) nbinom is an alias of NegativeBinomial. nbinom = NegativeBinomial(alpha=1.) """ pass
_default_hashfunc
Default hash function is variable-length version of Python's builtin hash. :param content: data that needs to hash. :return: return a decimal number.
# Created by SylvanasSun in 2017.10.17 # !/usr/bin/python # -*- coding: utf-8 -*- import collections import jieba from jieba import analyse # TODO: Change default hash algorithms to the other algorithms of high-performance. # MASKED: _default_hashfunc function (lines 11-29) # TODO: Change default toknizer to the c/c++ version or other tokenizer of high-performance. def _default_tokenizer_func(content, keyword_weight_pair): """ Default tokenizer function that uses jieba tokenizer. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...]. """ seg_list = jieba.lcut_for_search(content) # Extract keyword-weight list by TF-IDF algorithms and by sorted maximum weight return jieba.analyse.extract_tags("".join(seg_list), topK=keyword_weight_pair, withWeight=True) class Simhash(object): """ Class Simhash implements simhash algorithms of the Google for filter duplicate content. Simhash algorithms idea is will reduce the dimension of content and compares the difference of the "Hamming Distance" implements filter duplicate content. About simhash algorithms the more introduction: https://en.wikipedia.org/wiki/SimHash Simhash default tokenizer is jieba (https://github.com/fxsjy/jieba). """ def __init__(self, data, keyword_weight_pair=20, hash_bit_number=64, hashfunc=None, tokenizer_func=None): """ :param data: data that needs to be encode. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :param hash_bit_number: maximum bit number for hashcode. :param hashfunc: hash function,its first parameter must be data that needs to be encode and the second parameter must be hash bit number. :param tokenizer_func: tokenizer function,its first parameter must be content that needs to be tokenizer and the second parameter must be keyword_weight_pair. """ if hashfunc is None: self.hashfunc = _default_hashfunc else: self.hashfunc = hashfunc if tokenizer_func is None: self.tokenizer_func = _default_tokenizer_func else: self.tokenizer_func = tokenizer_func self.hash_bit_number = hash_bit_number self.keyword_weight_pari = keyword_weight_pair if isinstance(data, Simhash): self.hash = data.hash elif isinstance(data, int): self.hash = data else: self.simhash(data) def __str__(self): return str(self.hash) def simhash(self, content): """ Select policies for simhash on the different types of content. """ if content is None: self.hash = -1 return if isinstance(content, str): features = self.tokenizer_func(content, self.keyword_weight_pari) self.hash = self.build_from_features(features) elif isinstance(content, collections.Iterable): self.hash = self.build_from_features(content) elif isinstance(content, int): self.hash = content else: raise Exception("Unsupported parameter type %s" % type(content)) def build_from_features(self, features): """ :param features: a list of (token,weight) tuples or a token -> weight dict, if is a string so it need compute weight (a weight of 1 will be assumed). :return: a decimal digit for the accumulative result of each after handled features-weight pair. """ v = [0] * self.hash_bit_number if isinstance(features, dict): features = features.items() # Starting longitudinal accumulation of bits, current bit add current weight # when the current bits equal 1 and else current bit minus the current weight. for f in features: if isinstance(f, str): h = self.hashfunc(f, self.hash_bit_number) w = 1 else: assert isinstance(f, collections.Iterable) h = self.hashfunc(f[0], self.hash_bit_number) w = f[1] for i in range(self.hash_bit_number): bitmask = 1 << i v[i] += w if h & bitmask else -w # Just record weight of the non-negative fingerprint = 0 for i in range(self.hash_bit_number): if v[i] >= 0: fingerprint += 1 << i return fingerprint def is_equal(self, another, limit=0.8): """ Determine two simhash are similar or not similar. :param another: another simhash. :param limit: a limit of the similarity. :return: if similarity greater than limit return true and else return false. """ if another is None: raise Exception("Parameter another is null") if isinstance(another, int): distance = self.hamming_distance(another) elif isinstance(another, Simhash): assert self.hash_bit_number == another.hash_bit_number distance = self.hamming_distance(another.hash) else: raise Exception("Unsupported parameter type %s" % type(another)) similarity = float(self.hash_bit_number - distance) / self.hash_bit_number if similarity > limit: return True return False def hamming_distance(self, another): """ Compute hamming distance,hamming distance is a total number of different bits of two binary numbers. :param another: another simhash value. :return: a hamming distance that current simhash and another simhash. """ x = (self.hash ^ another) & ((1 << self.hash_bit_number) - 1) result = 0 while x: result += 1 x &= x - 1 return result if __name__ == "__main__": sentence_A = """ 明朝军制建立在军户制度上,军户即为中国古代世代从军、充当军差的人户。 东晋南北朝时,士兵及家属的户籍隶于军府称为军户。军户子弟世袭为兵未经准许不得脱离军籍。 北魏军户亦有用俘虏充当的。元朝实行军户制度,军户必须出成年男子到军队服役,父死子替,兄亡弟代,世代相袭。 """ sentence_B = """ 明朝的军制是在元朝基础上改进,而没有采用唐宋时期的募兵制。 元朝的军制是建立在游牧民族制度上发展而来,游牧民族在战争是全民征兵,实际上是军户制度。 建立元朝以后,蒙古族还是全部军户,对于占领区招降的军队,也实行军户制度。 """ sentence_C = "You know nothing Jon Snow!" sentence_D = "Jon Snow: I konw nothing." simhash_A = Simhash(sentence_A) simhash_B = Simhash(sentence_B) simhash_C = Simhash(sentence_C) simhash_D = Simhash(sentence_D) print(simhash_A) print(simhash_B) print(simhash_C) print(simhash_D) assert simhash_A.is_equal(simhash_B) is True assert simhash_B.is_equal(simhash_C) is False assert simhash_C.is_equal(simhash_D) is True
def _default_hashfunc(content, hashbits): """ Default hash function is variable-length version of Python's builtin hash. :param content: data that needs to hash. :return: return a decimal number. """ if content == "": return 0 x = ord(content[0]) << 7 m = 1000003 mask = 2 ** hashbits - 1 for c in content: x = ((x * m) ^ ord(c)) & mask x ^= len(content) if x == -1: x = -2 return x
11
29
# Created by SylvanasSun in 2017.10.17 # !/usr/bin/python # -*- coding: utf-8 -*- import collections import jieba from jieba import analyse # TODO: Change default hash algorithms to the other algorithms of high-performance. def _default_hashfunc(content, hashbits): """ Default hash function is variable-length version of Python's builtin hash. :param content: data that needs to hash. :return: return a decimal number. """ if content == "": return 0 x = ord(content[0]) << 7 m = 1000003 mask = 2 ** hashbits - 1 for c in content: x = ((x * m) ^ ord(c)) & mask x ^= len(content) if x == -1: x = -2 return x # TODO: Change default toknizer to the c/c++ version or other tokenizer of high-performance. def _default_tokenizer_func(content, keyword_weight_pair): """ Default tokenizer function that uses jieba tokenizer. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...]. """ seg_list = jieba.lcut_for_search(content) # Extract keyword-weight list by TF-IDF algorithms and by sorted maximum weight return jieba.analyse.extract_tags("".join(seg_list), topK=keyword_weight_pair, withWeight=True) class Simhash(object): """ Class Simhash implements simhash algorithms of the Google for filter duplicate content. Simhash algorithms idea is will reduce the dimension of content and compares the difference of the "Hamming Distance" implements filter duplicate content. About simhash algorithms the more introduction: https://en.wikipedia.org/wiki/SimHash Simhash default tokenizer is jieba (https://github.com/fxsjy/jieba). """ def __init__(self, data, keyword_weight_pair=20, hash_bit_number=64, hashfunc=None, tokenizer_func=None): """ :param data: data that needs to be encode. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :param hash_bit_number: maximum bit number for hashcode. :param hashfunc: hash function,its first parameter must be data that needs to be encode and the second parameter must be hash bit number. :param tokenizer_func: tokenizer function,its first parameter must be content that needs to be tokenizer and the second parameter must be keyword_weight_pair. """ if hashfunc is None: self.hashfunc = _default_hashfunc else: self.hashfunc = hashfunc if tokenizer_func is None: self.tokenizer_func = _default_tokenizer_func else: self.tokenizer_func = tokenizer_func self.hash_bit_number = hash_bit_number self.keyword_weight_pari = keyword_weight_pair if isinstance(data, Simhash): self.hash = data.hash elif isinstance(data, int): self.hash = data else: self.simhash(data) def __str__(self): return str(self.hash) def simhash(self, content): """ Select policies for simhash on the different types of content. """ if content is None: self.hash = -1 return if isinstance(content, str): features = self.tokenizer_func(content, self.keyword_weight_pari) self.hash = self.build_from_features(features) elif isinstance(content, collections.Iterable): self.hash = self.build_from_features(content) elif isinstance(content, int): self.hash = content else: raise Exception("Unsupported parameter type %s" % type(content)) def build_from_features(self, features): """ :param features: a list of (token,weight) tuples or a token -> weight dict, if is a string so it need compute weight (a weight of 1 will be assumed). :return: a decimal digit for the accumulative result of each after handled features-weight pair. """ v = [0] * self.hash_bit_number if isinstance(features, dict): features = features.items() # Starting longitudinal accumulation of bits, current bit add current weight # when the current bits equal 1 and else current bit minus the current weight. for f in features: if isinstance(f, str): h = self.hashfunc(f, self.hash_bit_number) w = 1 else: assert isinstance(f, collections.Iterable) h = self.hashfunc(f[0], self.hash_bit_number) w = f[1] for i in range(self.hash_bit_number): bitmask = 1 << i v[i] += w if h & bitmask else -w # Just record weight of the non-negative fingerprint = 0 for i in range(self.hash_bit_number): if v[i] >= 0: fingerprint += 1 << i return fingerprint def is_equal(self, another, limit=0.8): """ Determine two simhash are similar or not similar. :param another: another simhash. :param limit: a limit of the similarity. :return: if similarity greater than limit return true and else return false. """ if another is None: raise Exception("Parameter another is null") if isinstance(another, int): distance = self.hamming_distance(another) elif isinstance(another, Simhash): assert self.hash_bit_number == another.hash_bit_number distance = self.hamming_distance(another.hash) else: raise Exception("Unsupported parameter type %s" % type(another)) similarity = float(self.hash_bit_number - distance) / self.hash_bit_number if similarity > limit: return True return False def hamming_distance(self, another): """ Compute hamming distance,hamming distance is a total number of different bits of two binary numbers. :param another: another simhash value. :return: a hamming distance that current simhash and another simhash. """ x = (self.hash ^ another) & ((1 << self.hash_bit_number) - 1) result = 0 while x: result += 1 x &= x - 1 return result if __name__ == "__main__": sentence_A = """ 明朝军制建立在军户制度上,军户即为中国古代世代从军、充当军差的人户。 东晋南北朝时,士兵及家属的户籍隶于军府称为军户。军户子弟世袭为兵未经准许不得脱离军籍。 北魏军户亦有用俘虏充当的。元朝实行军户制度,军户必须出成年男子到军队服役,父死子替,兄亡弟代,世代相袭。 """ sentence_B = """ 明朝的军制是在元朝基础上改进,而没有采用唐宋时期的募兵制。 元朝的军制是建立在游牧民族制度上发展而来,游牧民族在战争是全民征兵,实际上是军户制度。 建立元朝以后,蒙古族还是全部军户,对于占领区招降的军队,也实行军户制度。 """ sentence_C = "You know nothing Jon Snow!" sentence_D = "Jon Snow: I konw nothing." simhash_A = Simhash(sentence_A) simhash_B = Simhash(sentence_B) simhash_C = Simhash(sentence_C) simhash_D = Simhash(sentence_D) print(simhash_A) print(simhash_B) print(simhash_C) print(simhash_D) assert simhash_A.is_equal(simhash_B) is True assert simhash_B.is_equal(simhash_C) is False assert simhash_C.is_equal(simhash_D) is True
__init__
:param data: data that needs to be encode. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :param hash_bit_number: maximum bit number for hashcode. :param hashfunc: hash function,its first parameter must be data that needs to be encode and the second parameter must be hash bit number. :param tokenizer_func: tokenizer function,its first parameter must be content that needs to be tokenizer and the second parameter must be keyword_weight_pair.
# Created by SylvanasSun in 2017.10.17 # !/usr/bin/python # -*- coding: utf-8 -*- import collections import jieba from jieba import analyse # TODO: Change default hash algorithms to the other algorithms of high-performance. def _default_hashfunc(content, hashbits): """ Default hash function is variable-length version of Python's builtin hash. :param content: data that needs to hash. :return: return a decimal number. """ if content == "": return 0 x = ord(content[0]) << 7 m = 1000003 mask = 2 ** hashbits - 1 for c in content: x = ((x * m) ^ ord(c)) & mask x ^= len(content) if x == -1: x = -2 return x # TODO: Change default toknizer to the c/c++ version or other tokenizer of high-performance. def _default_tokenizer_func(content, keyword_weight_pair): """ Default tokenizer function that uses jieba tokenizer. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...]. """ seg_list = jieba.lcut_for_search(content) # Extract keyword-weight list by TF-IDF algorithms and by sorted maximum weight return jieba.analyse.extract_tags("".join(seg_list), topK=keyword_weight_pair, withWeight=True) class Simhash(object): """ Class Simhash implements simhash algorithms of the Google for filter duplicate content. Simhash algorithms idea is will reduce the dimension of content and compares the difference of the "Hamming Distance" implements filter duplicate content. About simhash algorithms the more introduction: https://en.wikipedia.org/wiki/SimHash Simhash default tokenizer is jieba (https://github.com/fxsjy/jieba). """ # MASKED: __init__ function (lines 54-83) def __str__(self): return str(self.hash) def simhash(self, content): """ Select policies for simhash on the different types of content. """ if content is None: self.hash = -1 return if isinstance(content, str): features = self.tokenizer_func(content, self.keyword_weight_pari) self.hash = self.build_from_features(features) elif isinstance(content, collections.Iterable): self.hash = self.build_from_features(content) elif isinstance(content, int): self.hash = content else: raise Exception("Unsupported parameter type %s" % type(content)) def build_from_features(self, features): """ :param features: a list of (token,weight) tuples or a token -> weight dict, if is a string so it need compute weight (a weight of 1 will be assumed). :return: a decimal digit for the accumulative result of each after handled features-weight pair. """ v = [0] * self.hash_bit_number if isinstance(features, dict): features = features.items() # Starting longitudinal accumulation of bits, current bit add current weight # when the current bits equal 1 and else current bit minus the current weight. for f in features: if isinstance(f, str): h = self.hashfunc(f, self.hash_bit_number) w = 1 else: assert isinstance(f, collections.Iterable) h = self.hashfunc(f[0], self.hash_bit_number) w = f[1] for i in range(self.hash_bit_number): bitmask = 1 << i v[i] += w if h & bitmask else -w # Just record weight of the non-negative fingerprint = 0 for i in range(self.hash_bit_number): if v[i] >= 0: fingerprint += 1 << i return fingerprint def is_equal(self, another, limit=0.8): """ Determine two simhash are similar or not similar. :param another: another simhash. :param limit: a limit of the similarity. :return: if similarity greater than limit return true and else return false. """ if another is None: raise Exception("Parameter another is null") if isinstance(another, int): distance = self.hamming_distance(another) elif isinstance(another, Simhash): assert self.hash_bit_number == another.hash_bit_number distance = self.hamming_distance(another.hash) else: raise Exception("Unsupported parameter type %s" % type(another)) similarity = float(self.hash_bit_number - distance) / self.hash_bit_number if similarity > limit: return True return False def hamming_distance(self, another): """ Compute hamming distance,hamming distance is a total number of different bits of two binary numbers. :param another: another simhash value. :return: a hamming distance that current simhash and another simhash. """ x = (self.hash ^ another) & ((1 << self.hash_bit_number) - 1) result = 0 while x: result += 1 x &= x - 1 return result if __name__ == "__main__": sentence_A = """ 明朝军制建立在军户制度上,军户即为中国古代世代从军、充当军差的人户。 东晋南北朝时,士兵及家属的户籍隶于军府称为军户。军户子弟世袭为兵未经准许不得脱离军籍。 北魏军户亦有用俘虏充当的。元朝实行军户制度,军户必须出成年男子到军队服役,父死子替,兄亡弟代,世代相袭。 """ sentence_B = """ 明朝的军制是在元朝基础上改进,而没有采用唐宋时期的募兵制。 元朝的军制是建立在游牧民族制度上发展而来,游牧民族在战争是全民征兵,实际上是军户制度。 建立元朝以后,蒙古族还是全部军户,对于占领区招降的军队,也实行军户制度。 """ sentence_C = "You know nothing Jon Snow!" sentence_D = "Jon Snow: I konw nothing." simhash_A = Simhash(sentence_A) simhash_B = Simhash(sentence_B) simhash_C = Simhash(sentence_C) simhash_D = Simhash(sentence_D) print(simhash_A) print(simhash_B) print(simhash_C) print(simhash_D) assert simhash_A.is_equal(simhash_B) is True assert simhash_B.is_equal(simhash_C) is False assert simhash_C.is_equal(simhash_D) is True
def __init__(self, data, keyword_weight_pair=20, hash_bit_number=64, hashfunc=None, tokenizer_func=None): """ :param data: data that needs to be encode. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :param hash_bit_number: maximum bit number for hashcode. :param hashfunc: hash function,its first parameter must be data that needs to be encode and the second parameter must be hash bit number. :param tokenizer_func: tokenizer function,its first parameter must be content that needs to be tokenizer and the second parameter must be keyword_weight_pair. """ if hashfunc is None: self.hashfunc = _default_hashfunc else: self.hashfunc = hashfunc if tokenizer_func is None: self.tokenizer_func = _default_tokenizer_func else: self.tokenizer_func = tokenizer_func self.hash_bit_number = hash_bit_number self.keyword_weight_pari = keyword_weight_pair if isinstance(data, Simhash): self.hash = data.hash elif isinstance(data, int): self.hash = data else: self.simhash(data)
54
83
# Created by SylvanasSun in 2017.10.17 # !/usr/bin/python # -*- coding: utf-8 -*- import collections import jieba from jieba import analyse # TODO: Change default hash algorithms to the other algorithms of high-performance. def _default_hashfunc(content, hashbits): """ Default hash function is variable-length version of Python's builtin hash. :param content: data that needs to hash. :return: return a decimal number. """ if content == "": return 0 x = ord(content[0]) << 7 m = 1000003 mask = 2 ** hashbits - 1 for c in content: x = ((x * m) ^ ord(c)) & mask x ^= len(content) if x == -1: x = -2 return x # TODO: Change default toknizer to the c/c++ version or other tokenizer of high-performance. def _default_tokenizer_func(content, keyword_weight_pair): """ Default tokenizer function that uses jieba tokenizer. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...]. """ seg_list = jieba.lcut_for_search(content) # Extract keyword-weight list by TF-IDF algorithms and by sorted maximum weight return jieba.analyse.extract_tags("".join(seg_list), topK=keyword_weight_pair, withWeight=True) class Simhash(object): """ Class Simhash implements simhash algorithms of the Google for filter duplicate content. Simhash algorithms idea is will reduce the dimension of content and compares the difference of the "Hamming Distance" implements filter duplicate content. About simhash algorithms the more introduction: https://en.wikipedia.org/wiki/SimHash Simhash default tokenizer is jieba (https://github.com/fxsjy/jieba). """ def __init__(self, data, keyword_weight_pair=20, hash_bit_number=64, hashfunc=None, tokenizer_func=None): """ :param data: data that needs to be encode. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :param hash_bit_number: maximum bit number for hashcode. :param hashfunc: hash function,its first parameter must be data that needs to be encode and the second parameter must be hash bit number. :param tokenizer_func: tokenizer function,its first parameter must be content that needs to be tokenizer and the second parameter must be keyword_weight_pair. """ if hashfunc is None: self.hashfunc = _default_hashfunc else: self.hashfunc = hashfunc if tokenizer_func is None: self.tokenizer_func = _default_tokenizer_func else: self.tokenizer_func = tokenizer_func self.hash_bit_number = hash_bit_number self.keyword_weight_pari = keyword_weight_pair if isinstance(data, Simhash): self.hash = data.hash elif isinstance(data, int): self.hash = data else: self.simhash(data) def __str__(self): return str(self.hash) def simhash(self, content): """ Select policies for simhash on the different types of content. """ if content is None: self.hash = -1 return if isinstance(content, str): features = self.tokenizer_func(content, self.keyword_weight_pari) self.hash = self.build_from_features(features) elif isinstance(content, collections.Iterable): self.hash = self.build_from_features(content) elif isinstance(content, int): self.hash = content else: raise Exception("Unsupported parameter type %s" % type(content)) def build_from_features(self, features): """ :param features: a list of (token,weight) tuples or a token -> weight dict, if is a string so it need compute weight (a weight of 1 will be assumed). :return: a decimal digit for the accumulative result of each after handled features-weight pair. """ v = [0] * self.hash_bit_number if isinstance(features, dict): features = features.items() # Starting longitudinal accumulation of bits, current bit add current weight # when the current bits equal 1 and else current bit minus the current weight. for f in features: if isinstance(f, str): h = self.hashfunc(f, self.hash_bit_number) w = 1 else: assert isinstance(f, collections.Iterable) h = self.hashfunc(f[0], self.hash_bit_number) w = f[1] for i in range(self.hash_bit_number): bitmask = 1 << i v[i] += w if h & bitmask else -w # Just record weight of the non-negative fingerprint = 0 for i in range(self.hash_bit_number): if v[i] >= 0: fingerprint += 1 << i return fingerprint def is_equal(self, another, limit=0.8): """ Determine two simhash are similar or not similar. :param another: another simhash. :param limit: a limit of the similarity. :return: if similarity greater than limit return true and else return false. """ if another is None: raise Exception("Parameter another is null") if isinstance(another, int): distance = self.hamming_distance(another) elif isinstance(another, Simhash): assert self.hash_bit_number == another.hash_bit_number distance = self.hamming_distance(another.hash) else: raise Exception("Unsupported parameter type %s" % type(another)) similarity = float(self.hash_bit_number - distance) / self.hash_bit_number if similarity > limit: return True return False def hamming_distance(self, another): """ Compute hamming distance,hamming distance is a total number of different bits of two binary numbers. :param another: another simhash value. :return: a hamming distance that current simhash and another simhash. """ x = (self.hash ^ another) & ((1 << self.hash_bit_number) - 1) result = 0 while x: result += 1 x &= x - 1 return result if __name__ == "__main__": sentence_A = """ 明朝军制建立在军户制度上,军户即为中国古代世代从军、充当军差的人户。 东晋南北朝时,士兵及家属的户籍隶于军府称为军户。军户子弟世袭为兵未经准许不得脱离军籍。 北魏军户亦有用俘虏充当的。元朝实行军户制度,军户必须出成年男子到军队服役,父死子替,兄亡弟代,世代相袭。 """ sentence_B = """ 明朝的军制是在元朝基础上改进,而没有采用唐宋时期的募兵制。 元朝的军制是建立在游牧民族制度上发展而来,游牧民族在战争是全民征兵,实际上是军户制度。 建立元朝以后,蒙古族还是全部军户,对于占领区招降的军队,也实行军户制度。 """ sentence_C = "You know nothing Jon Snow!" sentence_D = "Jon Snow: I konw nothing." simhash_A = Simhash(sentence_A) simhash_B = Simhash(sentence_B) simhash_C = Simhash(sentence_C) simhash_D = Simhash(sentence_D) print(simhash_A) print(simhash_B) print(simhash_C) print(simhash_D) assert simhash_A.is_equal(simhash_B) is True assert simhash_B.is_equal(simhash_C) is False assert simhash_C.is_equal(simhash_D) is True
is_equal
Determine two simhash are similar or not similar. :param another: another simhash. :param limit: a limit of the similarity. :return: if similarity greater than limit return true and else return false.
# Created by SylvanasSun in 2017.10.17 # !/usr/bin/python # -*- coding: utf-8 -*- import collections import jieba from jieba import analyse # TODO: Change default hash algorithms to the other algorithms of high-performance. def _default_hashfunc(content, hashbits): """ Default hash function is variable-length version of Python's builtin hash. :param content: data that needs to hash. :return: return a decimal number. """ if content == "": return 0 x = ord(content[0]) << 7 m = 1000003 mask = 2 ** hashbits - 1 for c in content: x = ((x * m) ^ ord(c)) & mask x ^= len(content) if x == -1: x = -2 return x # TODO: Change default toknizer to the c/c++ version or other tokenizer of high-performance. def _default_tokenizer_func(content, keyword_weight_pair): """ Default tokenizer function that uses jieba tokenizer. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...]. """ seg_list = jieba.lcut_for_search(content) # Extract keyword-weight list by TF-IDF algorithms and by sorted maximum weight return jieba.analyse.extract_tags("".join(seg_list), topK=keyword_weight_pair, withWeight=True) class Simhash(object): """ Class Simhash implements simhash algorithms of the Google for filter duplicate content. Simhash algorithms idea is will reduce the dimension of content and compares the difference of the "Hamming Distance" implements filter duplicate content. About simhash algorithms the more introduction: https://en.wikipedia.org/wiki/SimHash Simhash default tokenizer is jieba (https://github.com/fxsjy/jieba). """ def __init__(self, data, keyword_weight_pair=20, hash_bit_number=64, hashfunc=None, tokenizer_func=None): """ :param data: data that needs to be encode. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :param hash_bit_number: maximum bit number for hashcode. :param hashfunc: hash function,its first parameter must be data that needs to be encode and the second parameter must be hash bit number. :param tokenizer_func: tokenizer function,its first parameter must be content that needs to be tokenizer and the second parameter must be keyword_weight_pair. """ if hashfunc is None: self.hashfunc = _default_hashfunc else: self.hashfunc = hashfunc if tokenizer_func is None: self.tokenizer_func = _default_tokenizer_func else: self.tokenizer_func = tokenizer_func self.hash_bit_number = hash_bit_number self.keyword_weight_pari = keyword_weight_pair if isinstance(data, Simhash): self.hash = data.hash elif isinstance(data, int): self.hash = data else: self.simhash(data) def __str__(self): return str(self.hash) def simhash(self, content): """ Select policies for simhash on the different types of content. """ if content is None: self.hash = -1 return if isinstance(content, str): features = self.tokenizer_func(content, self.keyword_weight_pari) self.hash = self.build_from_features(features) elif isinstance(content, collections.Iterable): self.hash = self.build_from_features(content) elif isinstance(content, int): self.hash = content else: raise Exception("Unsupported parameter type %s" % type(content)) def build_from_features(self, features): """ :param features: a list of (token,weight) tuples or a token -> weight dict, if is a string so it need compute weight (a weight of 1 will be assumed). :return: a decimal digit for the accumulative result of each after handled features-weight pair. """ v = [0] * self.hash_bit_number if isinstance(features, dict): features = features.items() # Starting longitudinal accumulation of bits, current bit add current weight # when the current bits equal 1 and else current bit minus the current weight. for f in features: if isinstance(f, str): h = self.hashfunc(f, self.hash_bit_number) w = 1 else: assert isinstance(f, collections.Iterable) h = self.hashfunc(f[0], self.hash_bit_number) w = f[1] for i in range(self.hash_bit_number): bitmask = 1 << i v[i] += w if h & bitmask else -w # Just record weight of the non-negative fingerprint = 0 for i in range(self.hash_bit_number): if v[i] >= 0: fingerprint += 1 << i return fingerprint # MASKED: is_equal function (lines 139-161) def hamming_distance(self, another): """ Compute hamming distance,hamming distance is a total number of different bits of two binary numbers. :param another: another simhash value. :return: a hamming distance that current simhash and another simhash. """ x = (self.hash ^ another) & ((1 << self.hash_bit_number) - 1) result = 0 while x: result += 1 x &= x - 1 return result if __name__ == "__main__": sentence_A = """ 明朝军制建立在军户制度上,军户即为中国古代世代从军、充当军差的人户。 东晋南北朝时,士兵及家属的户籍隶于军府称为军户。军户子弟世袭为兵未经准许不得脱离军籍。 北魏军户亦有用俘虏充当的。元朝实行军户制度,军户必须出成年男子到军队服役,父死子替,兄亡弟代,世代相袭。 """ sentence_B = """ 明朝的军制是在元朝基础上改进,而没有采用唐宋时期的募兵制。 元朝的军制是建立在游牧民族制度上发展而来,游牧民族在战争是全民征兵,实际上是军户制度。 建立元朝以后,蒙古族还是全部军户,对于占领区招降的军队,也实行军户制度。 """ sentence_C = "You know nothing Jon Snow!" sentence_D = "Jon Snow: I konw nothing." simhash_A = Simhash(sentence_A) simhash_B = Simhash(sentence_B) simhash_C = Simhash(sentence_C) simhash_D = Simhash(sentence_D) print(simhash_A) print(simhash_B) print(simhash_C) print(simhash_D) assert simhash_A.is_equal(simhash_B) is True assert simhash_B.is_equal(simhash_C) is False assert simhash_C.is_equal(simhash_D) is True
def is_equal(self, another, limit=0.8): """ Determine two simhash are similar or not similar. :param another: another simhash. :param limit: a limit of the similarity. :return: if similarity greater than limit return true and else return false. """ if another is None: raise Exception("Parameter another is null") if isinstance(another, int): distance = self.hamming_distance(another) elif isinstance(another, Simhash): assert self.hash_bit_number == another.hash_bit_number distance = self.hamming_distance(another.hash) else: raise Exception("Unsupported parameter type %s" % type(another)) similarity = float(self.hash_bit_number - distance) / self.hash_bit_number if similarity > limit: return True return False
139
161
# Created by SylvanasSun in 2017.10.17 # !/usr/bin/python # -*- coding: utf-8 -*- import collections import jieba from jieba import analyse # TODO: Change default hash algorithms to the other algorithms of high-performance. def _default_hashfunc(content, hashbits): """ Default hash function is variable-length version of Python's builtin hash. :param content: data that needs to hash. :return: return a decimal number. """ if content == "": return 0 x = ord(content[0]) << 7 m = 1000003 mask = 2 ** hashbits - 1 for c in content: x = ((x * m) ^ ord(c)) & mask x ^= len(content) if x == -1: x = -2 return x # TODO: Change default toknizer to the c/c++ version or other tokenizer of high-performance. def _default_tokenizer_func(content, keyword_weight_pair): """ Default tokenizer function that uses jieba tokenizer. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...]. """ seg_list = jieba.lcut_for_search(content) # Extract keyword-weight list by TF-IDF algorithms and by sorted maximum weight return jieba.analyse.extract_tags("".join(seg_list), topK=keyword_weight_pair, withWeight=True) class Simhash(object): """ Class Simhash implements simhash algorithms of the Google for filter duplicate content. Simhash algorithms idea is will reduce the dimension of content and compares the difference of the "Hamming Distance" implements filter duplicate content. About simhash algorithms the more introduction: https://en.wikipedia.org/wiki/SimHash Simhash default tokenizer is jieba (https://github.com/fxsjy/jieba). """ def __init__(self, data, keyword_weight_pair=20, hash_bit_number=64, hashfunc=None, tokenizer_func=None): """ :param data: data that needs to be encode. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :param hash_bit_number: maximum bit number for hashcode. :param hashfunc: hash function,its first parameter must be data that needs to be encode and the second parameter must be hash bit number. :param tokenizer_func: tokenizer function,its first parameter must be content that needs to be tokenizer and the second parameter must be keyword_weight_pair. """ if hashfunc is None: self.hashfunc = _default_hashfunc else: self.hashfunc = hashfunc if tokenizer_func is None: self.tokenizer_func = _default_tokenizer_func else: self.tokenizer_func = tokenizer_func self.hash_bit_number = hash_bit_number self.keyword_weight_pari = keyword_weight_pair if isinstance(data, Simhash): self.hash = data.hash elif isinstance(data, int): self.hash = data else: self.simhash(data) def __str__(self): return str(self.hash) def simhash(self, content): """ Select policies for simhash on the different types of content. """ if content is None: self.hash = -1 return if isinstance(content, str): features = self.tokenizer_func(content, self.keyword_weight_pari) self.hash = self.build_from_features(features) elif isinstance(content, collections.Iterable): self.hash = self.build_from_features(content) elif isinstance(content, int): self.hash = content else: raise Exception("Unsupported parameter type %s" % type(content)) def build_from_features(self, features): """ :param features: a list of (token,weight) tuples or a token -> weight dict, if is a string so it need compute weight (a weight of 1 will be assumed). :return: a decimal digit for the accumulative result of each after handled features-weight pair. """ v = [0] * self.hash_bit_number if isinstance(features, dict): features = features.items() # Starting longitudinal accumulation of bits, current bit add current weight # when the current bits equal 1 and else current bit minus the current weight. for f in features: if isinstance(f, str): h = self.hashfunc(f, self.hash_bit_number) w = 1 else: assert isinstance(f, collections.Iterable) h = self.hashfunc(f[0], self.hash_bit_number) w = f[1] for i in range(self.hash_bit_number): bitmask = 1 << i v[i] += w if h & bitmask else -w # Just record weight of the non-negative fingerprint = 0 for i in range(self.hash_bit_number): if v[i] >= 0: fingerprint += 1 << i return fingerprint def is_equal(self, another, limit=0.8): """ Determine two simhash are similar or not similar. :param another: another simhash. :param limit: a limit of the similarity. :return: if similarity greater than limit return true and else return false. """ if another is None: raise Exception("Parameter another is null") if isinstance(another, int): distance = self.hamming_distance(another) elif isinstance(another, Simhash): assert self.hash_bit_number == another.hash_bit_number distance = self.hamming_distance(another.hash) else: raise Exception("Unsupported parameter type %s" % type(another)) similarity = float(self.hash_bit_number - distance) / self.hash_bit_number if similarity > limit: return True return False def hamming_distance(self, another): """ Compute hamming distance,hamming distance is a total number of different bits of two binary numbers. :param another: another simhash value. :return: a hamming distance that current simhash and another simhash. """ x = (self.hash ^ another) & ((1 << self.hash_bit_number) - 1) result = 0 while x: result += 1 x &= x - 1 return result if __name__ == "__main__": sentence_A = """ 明朝军制建立在军户制度上,军户即为中国古代世代从军、充当军差的人户。 东晋南北朝时,士兵及家属的户籍隶于军府称为军户。军户子弟世袭为兵未经准许不得脱离军籍。 北魏军户亦有用俘虏充当的。元朝实行军户制度,军户必须出成年男子到军队服役,父死子替,兄亡弟代,世代相袭。 """ sentence_B = """ 明朝的军制是在元朝基础上改进,而没有采用唐宋时期的募兵制。 元朝的军制是建立在游牧民族制度上发展而来,游牧民族在战争是全民征兵,实际上是军户制度。 建立元朝以后,蒙古族还是全部军户,对于占领区招降的军队,也实行军户制度。 """ sentence_C = "You know nothing Jon Snow!" sentence_D = "Jon Snow: I konw nothing." simhash_A = Simhash(sentence_A) simhash_B = Simhash(sentence_B) simhash_C = Simhash(sentence_C) simhash_D = Simhash(sentence_D) print(simhash_A) print(simhash_B) print(simhash_C) print(simhash_D) assert simhash_A.is_equal(simhash_B) is True assert simhash_B.is_equal(simhash_C) is False assert simhash_C.is_equal(simhash_D) is True
hamming_distance
Compute hamming distance,hamming distance is a total number of different bits of two binary numbers. :param another: another simhash value. :return: a hamming distance that current simhash and another simhash.
# Created by SylvanasSun in 2017.10.17 # !/usr/bin/python # -*- coding: utf-8 -*- import collections import jieba from jieba import analyse # TODO: Change default hash algorithms to the other algorithms of high-performance. def _default_hashfunc(content, hashbits): """ Default hash function is variable-length version of Python's builtin hash. :param content: data that needs to hash. :return: return a decimal number. """ if content == "": return 0 x = ord(content[0]) << 7 m = 1000003 mask = 2 ** hashbits - 1 for c in content: x = ((x * m) ^ ord(c)) & mask x ^= len(content) if x == -1: x = -2 return x # TODO: Change default toknizer to the c/c++ version or other tokenizer of high-performance. def _default_tokenizer_func(content, keyword_weight_pair): """ Default tokenizer function that uses jieba tokenizer. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...]. """ seg_list = jieba.lcut_for_search(content) # Extract keyword-weight list by TF-IDF algorithms and by sorted maximum weight return jieba.analyse.extract_tags("".join(seg_list), topK=keyword_weight_pair, withWeight=True) class Simhash(object): """ Class Simhash implements simhash algorithms of the Google for filter duplicate content. Simhash algorithms idea is will reduce the dimension of content and compares the difference of the "Hamming Distance" implements filter duplicate content. About simhash algorithms the more introduction: https://en.wikipedia.org/wiki/SimHash Simhash default tokenizer is jieba (https://github.com/fxsjy/jieba). """ def __init__(self, data, keyword_weight_pair=20, hash_bit_number=64, hashfunc=None, tokenizer_func=None): """ :param data: data that needs to be encode. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :param hash_bit_number: maximum bit number for hashcode. :param hashfunc: hash function,its first parameter must be data that needs to be encode and the second parameter must be hash bit number. :param tokenizer_func: tokenizer function,its first parameter must be content that needs to be tokenizer and the second parameter must be keyword_weight_pair. """ if hashfunc is None: self.hashfunc = _default_hashfunc else: self.hashfunc = hashfunc if tokenizer_func is None: self.tokenizer_func = _default_tokenizer_func else: self.tokenizer_func = tokenizer_func self.hash_bit_number = hash_bit_number self.keyword_weight_pari = keyword_weight_pair if isinstance(data, Simhash): self.hash = data.hash elif isinstance(data, int): self.hash = data else: self.simhash(data) def __str__(self): return str(self.hash) def simhash(self, content): """ Select policies for simhash on the different types of content. """ if content is None: self.hash = -1 return if isinstance(content, str): features = self.tokenizer_func(content, self.keyword_weight_pari) self.hash = self.build_from_features(features) elif isinstance(content, collections.Iterable): self.hash = self.build_from_features(content) elif isinstance(content, int): self.hash = content else: raise Exception("Unsupported parameter type %s" % type(content)) def build_from_features(self, features): """ :param features: a list of (token,weight) tuples or a token -> weight dict, if is a string so it need compute weight (a weight of 1 will be assumed). :return: a decimal digit for the accumulative result of each after handled features-weight pair. """ v = [0] * self.hash_bit_number if isinstance(features, dict): features = features.items() # Starting longitudinal accumulation of bits, current bit add current weight # when the current bits equal 1 and else current bit minus the current weight. for f in features: if isinstance(f, str): h = self.hashfunc(f, self.hash_bit_number) w = 1 else: assert isinstance(f, collections.Iterable) h = self.hashfunc(f[0], self.hash_bit_number) w = f[1] for i in range(self.hash_bit_number): bitmask = 1 << i v[i] += w if h & bitmask else -w # Just record weight of the non-negative fingerprint = 0 for i in range(self.hash_bit_number): if v[i] >= 0: fingerprint += 1 << i return fingerprint def is_equal(self, another, limit=0.8): """ Determine two simhash are similar or not similar. :param another: another simhash. :param limit: a limit of the similarity. :return: if similarity greater than limit return true and else return false. """ if another is None: raise Exception("Parameter another is null") if isinstance(another, int): distance = self.hamming_distance(another) elif isinstance(another, Simhash): assert self.hash_bit_number == another.hash_bit_number distance = self.hamming_distance(another.hash) else: raise Exception("Unsupported parameter type %s" % type(another)) similarity = float(self.hash_bit_number - distance) / self.hash_bit_number if similarity > limit: return True return False # MASKED: hamming_distance function (lines 163-175) if __name__ == "__main__": sentence_A = """ 明朝军制建立在军户制度上,军户即为中国古代世代从军、充当军差的人户。 东晋南北朝时,士兵及家属的户籍隶于军府称为军户。军户子弟世袭为兵未经准许不得脱离军籍。 北魏军户亦有用俘虏充当的。元朝实行军户制度,军户必须出成年男子到军队服役,父死子替,兄亡弟代,世代相袭。 """ sentence_B = """ 明朝的军制是在元朝基础上改进,而没有采用唐宋时期的募兵制。 元朝的军制是建立在游牧民族制度上发展而来,游牧民族在战争是全民征兵,实际上是军户制度。 建立元朝以后,蒙古族还是全部军户,对于占领区招降的军队,也实行军户制度。 """ sentence_C = "You know nothing Jon Snow!" sentence_D = "Jon Snow: I konw nothing." simhash_A = Simhash(sentence_A) simhash_B = Simhash(sentence_B) simhash_C = Simhash(sentence_C) simhash_D = Simhash(sentence_D) print(simhash_A) print(simhash_B) print(simhash_C) print(simhash_D) assert simhash_A.is_equal(simhash_B) is True assert simhash_B.is_equal(simhash_C) is False assert simhash_C.is_equal(simhash_D) is True
def hamming_distance(self, another): """ Compute hamming distance,hamming distance is a total number of different bits of two binary numbers. :param another: another simhash value. :return: a hamming distance that current simhash and another simhash. """ x = (self.hash ^ another) & ((1 << self.hash_bit_number) - 1) result = 0 while x: result += 1 x &= x - 1 return result
163
175
# Created by SylvanasSun in 2017.10.17 # !/usr/bin/python # -*- coding: utf-8 -*- import collections import jieba from jieba import analyse # TODO: Change default hash algorithms to the other algorithms of high-performance. def _default_hashfunc(content, hashbits): """ Default hash function is variable-length version of Python's builtin hash. :param content: data that needs to hash. :return: return a decimal number. """ if content == "": return 0 x = ord(content[0]) << 7 m = 1000003 mask = 2 ** hashbits - 1 for c in content: x = ((x * m) ^ ord(c)) & mask x ^= len(content) if x == -1: x = -2 return x # TODO: Change default toknizer to the c/c++ version or other tokenizer of high-performance. def _default_tokenizer_func(content, keyword_weight_pair): """ Default tokenizer function that uses jieba tokenizer. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...]. """ seg_list = jieba.lcut_for_search(content) # Extract keyword-weight list by TF-IDF algorithms and by sorted maximum weight return jieba.analyse.extract_tags("".join(seg_list), topK=keyword_weight_pair, withWeight=True) class Simhash(object): """ Class Simhash implements simhash algorithms of the Google for filter duplicate content. Simhash algorithms idea is will reduce the dimension of content and compares the difference of the "Hamming Distance" implements filter duplicate content. About simhash algorithms the more introduction: https://en.wikipedia.org/wiki/SimHash Simhash default tokenizer is jieba (https://github.com/fxsjy/jieba). """ def __init__(self, data, keyword_weight_pair=20, hash_bit_number=64, hashfunc=None, tokenizer_func=None): """ :param data: data that needs to be encode. :param keyword_weight_pair: maximum pair number of the keyword-weight list. :param hash_bit_number: maximum bit number for hashcode. :param hashfunc: hash function,its first parameter must be data that needs to be encode and the second parameter must be hash bit number. :param tokenizer_func: tokenizer function,its first parameter must be content that needs to be tokenizer and the second parameter must be keyword_weight_pair. """ if hashfunc is None: self.hashfunc = _default_hashfunc else: self.hashfunc = hashfunc if tokenizer_func is None: self.tokenizer_func = _default_tokenizer_func else: self.tokenizer_func = tokenizer_func self.hash_bit_number = hash_bit_number self.keyword_weight_pari = keyword_weight_pair if isinstance(data, Simhash): self.hash = data.hash elif isinstance(data, int): self.hash = data else: self.simhash(data) def __str__(self): return str(self.hash) def simhash(self, content): """ Select policies for simhash on the different types of content. """ if content is None: self.hash = -1 return if isinstance(content, str): features = self.tokenizer_func(content, self.keyword_weight_pari) self.hash = self.build_from_features(features) elif isinstance(content, collections.Iterable): self.hash = self.build_from_features(content) elif isinstance(content, int): self.hash = content else: raise Exception("Unsupported parameter type %s" % type(content)) def build_from_features(self, features): """ :param features: a list of (token,weight) tuples or a token -> weight dict, if is a string so it need compute weight (a weight of 1 will be assumed). :return: a decimal digit for the accumulative result of each after handled features-weight pair. """ v = [0] * self.hash_bit_number if isinstance(features, dict): features = features.items() # Starting longitudinal accumulation of bits, current bit add current weight # when the current bits equal 1 and else current bit minus the current weight. for f in features: if isinstance(f, str): h = self.hashfunc(f, self.hash_bit_number) w = 1 else: assert isinstance(f, collections.Iterable) h = self.hashfunc(f[0], self.hash_bit_number) w = f[1] for i in range(self.hash_bit_number): bitmask = 1 << i v[i] += w if h & bitmask else -w # Just record weight of the non-negative fingerprint = 0 for i in range(self.hash_bit_number): if v[i] >= 0: fingerprint += 1 << i return fingerprint def is_equal(self, another, limit=0.8): """ Determine two simhash are similar or not similar. :param another: another simhash. :param limit: a limit of the similarity. :return: if similarity greater than limit return true and else return false. """ if another is None: raise Exception("Parameter another is null") if isinstance(another, int): distance = self.hamming_distance(another) elif isinstance(another, Simhash): assert self.hash_bit_number == another.hash_bit_number distance = self.hamming_distance(another.hash) else: raise Exception("Unsupported parameter type %s" % type(another)) similarity = float(self.hash_bit_number - distance) / self.hash_bit_number if similarity > limit: return True return False def hamming_distance(self, another): """ Compute hamming distance,hamming distance is a total number of different bits of two binary numbers. :param another: another simhash value. :return: a hamming distance that current simhash and another simhash. """ x = (self.hash ^ another) & ((1 << self.hash_bit_number) - 1) result = 0 while x: result += 1 x &= x - 1 return result if __name__ == "__main__": sentence_A = """ 明朝军制建立在军户制度上,军户即为中国古代世代从军、充当军差的人户。 东晋南北朝时,士兵及家属的户籍隶于军府称为军户。军户子弟世袭为兵未经准许不得脱离军籍。 北魏军户亦有用俘虏充当的。元朝实行军户制度,军户必须出成年男子到军队服役,父死子替,兄亡弟代,世代相袭。 """ sentence_B = """ 明朝的军制是在元朝基础上改进,而没有采用唐宋时期的募兵制。 元朝的军制是建立在游牧民族制度上发展而来,游牧民族在战争是全民征兵,实际上是军户制度。 建立元朝以后,蒙古族还是全部军户,对于占领区招降的军队,也实行军户制度。 """ sentence_C = "You know nothing Jon Snow!" sentence_D = "Jon Snow: I konw nothing." simhash_A = Simhash(sentence_A) simhash_B = Simhash(sentence_B) simhash_C = Simhash(sentence_C) simhash_D = Simhash(sentence_D) print(simhash_A) print(simhash_B) print(simhash_C) print(simhash_D) assert simhash_A.is_equal(simhash_B) is True assert simhash_B.is_equal(simhash_C) is False assert simhash_C.is_equal(simhash_D) is True
process_20_newsgroups
Process 20 newsgroups into (data, target, metadata) format. Parameters ---------- unpack_dir: path The interim parent directory the dataset files have been unpacked into. extract_dir: str Name of the directory of the unpacked files relative to the unpack_dir. Note that opts: dict default {"subset":"all", "remove"="('headers', 'footers', 'quotes')"} Options to pass to sklearn.datasets.fetch_20newsgroups. Returns ------- A tuple: (data, target, additional_metadata)
""" Custom dataset processing/generation functions should be added to this file """ import pathlib from sklearn.datasets import fetch_20newsgroups from functools import partial from src import workflow, paths from src.log import logger import src.log.debug from tqdm.auto import tqdm from .. import paths from ..log import logger __all__ = [ 'process_20_newsgroups' ] # MASKED: process_20_newsgroups function (lines 23-58)
def process_20_newsgroups(*, extract_dir='20_newsgroups', metadata=None, unpack_dir=None, opts={"subset":"all", "remove":"('headers', 'footers', 'quotes')"}): """ Process 20 newsgroups into (data, target, metadata) format. Parameters ---------- unpack_dir: path The interim parent directory the dataset files have been unpacked into. extract_dir: str Name of the directory of the unpacked files relative to the unpack_dir. Note that opts: dict default {"subset":"all", "remove"="('headers', 'footers', 'quotes')"} Options to pass to sklearn.datasets.fetch_20newsgroups. Returns ------- A tuple: (data, target, additional_metadata) """ if metadata is None: metadata = {} if unpack_dir is None: unpack_dir = paths['interim_data_path'] else: unpack_dir = pathlib.Path(unpack_dir) data_dir = unpack_dir / f"{extract_dir}" news = fetch_20newsgroups(**opts) metadata['target_names'] = news.target_names return news.data, news.target, metadata
23
58
""" Custom dataset processing/generation functions should be added to this file """ import pathlib from sklearn.datasets import fetch_20newsgroups from functools import partial from src import workflow, paths from src.log import logger import src.log.debug from tqdm.auto import tqdm from .. import paths from ..log import logger __all__ = [ 'process_20_newsgroups' ] def process_20_newsgroups(*, extract_dir='20_newsgroups', metadata=None, unpack_dir=None, opts={"subset":"all", "remove":"('headers', 'footers', 'quotes')"}): """ Process 20 newsgroups into (data, target, metadata) format. Parameters ---------- unpack_dir: path The interim parent directory the dataset files have been unpacked into. extract_dir: str Name of the directory of the unpacked files relative to the unpack_dir. Note that opts: dict default {"subset":"all", "remove"="('headers', 'footers', 'quotes')"} Options to pass to sklearn.datasets.fetch_20newsgroups. Returns ------- A tuple: (data, target, additional_metadata) """ if metadata is None: metadata = {} if unpack_dir is None: unpack_dir = paths['interim_data_path'] else: unpack_dir = pathlib.Path(unpack_dir) data_dir = unpack_dir / f"{extract_dir}" news = fetch_20newsgroups(**opts) metadata['target_names'] = news.target_names return news.data, news.target, metadata
flatten_args
Converts a dictionary of dictionarys and lists into a flat table. Args: args_in: dictionary containing a hierachy of dictionaries and lists. Leaf values can be strings, bools, numbers.. Returns: A flat dictionary with keys separated by '.' and string values.
# Lint as: python3 # Copyright 2020 The DMLab2D Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Function for flattening dictionary settings.""" import numbers from typing import Mapping, Sequence def _flatten_args(pairs_in, args_out, prefix, visited_stack): """Helper function for flatten_args. See `flatten_args` below for details.""" for key, v in pairs_in: if not isinstance(key, str): raise ValueError('Keys must be strings. %r' % key) flat_key = prefix + '.' + key if prefix else key if v is None: args_out[flat_key] = 'none' elif isinstance(v, str): args_out[flat_key] = v elif isinstance(v, bool): args_out[flat_key] = 'true' if v else 'false' elif isinstance(v, numbers.Number): args_out[flat_key] = str(v) elif isinstance(v, Mapping): if not any(v is entry for entry in visited_stack): _flatten_args(v.items(), args_out, flat_key, visited_stack + [v]) elif isinstance(v, Sequence): if not any(v is entry for entry in visited_stack): _flatten_args(((str(i + 1), vv) for i, vv in enumerate(v)), args_out, flat_key, visited_stack + [v]) else: raise ValueError('Value for \'{}\' cannot be type: \'{}\''.format( flat_key, str(type(v)))) # MASKED: flatten_args function (lines 50-62)
def flatten_args(args_in): """Converts a dictionary of dictionarys and lists into a flat table. Args: args_in: dictionary containing a hierachy of dictionaries and lists. Leaf values can be strings, bools, numbers.. Returns: A flat dictionary with keys separated by '.' and string values. """ args_out = {} _flatten_args(args_in.items(), args_out, None, [args_in]) return args_out
50
62
# Lint as: python3 # Copyright 2020 The DMLab2D Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Function for flattening dictionary settings.""" import numbers from typing import Mapping, Sequence def _flatten_args(pairs_in, args_out, prefix, visited_stack): """Helper function for flatten_args. See `flatten_args` below for details.""" for key, v in pairs_in: if not isinstance(key, str): raise ValueError('Keys must be strings. %r' % key) flat_key = prefix + '.' + key if prefix else key if v is None: args_out[flat_key] = 'none' elif isinstance(v, str): args_out[flat_key] = v elif isinstance(v, bool): args_out[flat_key] = 'true' if v else 'false' elif isinstance(v, numbers.Number): args_out[flat_key] = str(v) elif isinstance(v, Mapping): if not any(v is entry for entry in visited_stack): _flatten_args(v.items(), args_out, flat_key, visited_stack + [v]) elif isinstance(v, Sequence): if not any(v is entry for entry in visited_stack): _flatten_args(((str(i + 1), vv) for i, vv in enumerate(v)), args_out, flat_key, visited_stack + [v]) else: raise ValueError('Value for \'{}\' cannot be type: \'{}\''.format( flat_key, str(type(v)))) def flatten_args(args_in): """Converts a dictionary of dictionarys and lists into a flat table. Args: args_in: dictionary containing a hierachy of dictionaries and lists. Leaf values can be strings, bools, numbers.. Returns: A flat dictionary with keys separated by '.' and string values. """ args_out = {} _flatten_args(args_in.items(), args_out, None, [args_in]) return args_out
__init__
Convolutional model :param kwargs: window_size: int stride_size: int test_percentage: float n_features: int n_outputs: int
from random import shuffle from models.RainbowModelLeaveRecsOut import RainbowModelLeaveRecsOut from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout # type: ignore from tensorflow.keras.models import Sequential # type: ignore import numpy as np from utils.Recording import Recording from utils.array_operations import split_list_by_percentage from utils.typing import assert_type class ConvModel(RainbowModelLeaveRecsOut): # MASKED: __init__ function (lines 13-34) def __create_model(self, n_features, n_outputs): # window_size, n_features, n_outputs = X.shape[1], X.shape[2], y.shape[1] print( f"Building model for {self.window_size} timesteps (window_size) and {n_features} features" ) model = Sequential() model.add( Conv1D( filters=64, kernel_size=3, activation="relu", input_shape=(self.window_size, n_features), ) ) model.add(Conv1D(filters=64, kernel_size=3, activation="relu")) model.add(Dropout(0.5)) model.add(MaxPooling1D(pool_size=2)) model.add(Flatten()) model.add(Dense(100, activation="relu")) model.add(Dense(n_outputs, activation="softmax")) model.compile( loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"] ) return model
def __init__(self, **kwargs): """ Convolutional model :param kwargs: window_size: int stride_size: int test_percentage: float n_features: int n_outputs: int """ # hyper params to instance vars self.window_size = kwargs["window_size"] self.stride_size = kwargs["stride_size"] self.test_percentage = kwargs["test_percentage"] self.verbose = 0 self.epochs = 10 self.batch_size = 32 # create model self.model = self.__create_model(kwargs["n_features"], kwargs["n_outputs"])
13
34
from random import shuffle from models.RainbowModelLeaveRecsOut import RainbowModelLeaveRecsOut from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout # type: ignore from tensorflow.keras.models import Sequential # type: ignore import numpy as np from utils.Recording import Recording from utils.array_operations import split_list_by_percentage from utils.typing import assert_type class ConvModel(RainbowModelLeaveRecsOut): def __init__(self, **kwargs): """ Convolutional model :param kwargs: window_size: int stride_size: int test_percentage: float n_features: int n_outputs: int """ # hyper params to instance vars self.window_size = kwargs["window_size"] self.stride_size = kwargs["stride_size"] self.test_percentage = kwargs["test_percentage"] self.verbose = 0 self.epochs = 10 self.batch_size = 32 # create model self.model = self.__create_model(kwargs["n_features"], kwargs["n_outputs"]) def __create_model(self, n_features, n_outputs): # window_size, n_features, n_outputs = X.shape[1], X.shape[2], y.shape[1] print( f"Building model for {self.window_size} timesteps (window_size) and {n_features} features" ) model = Sequential() model.add( Conv1D( filters=64, kernel_size=3, activation="relu", input_shape=(self.window_size, n_features), ) ) model.add(Conv1D(filters=64, kernel_size=3, activation="relu")) model.add(Dropout(0.5)) model.add(MaxPooling1D(pool_size=2)) model.add(Flatten()) model.add(Dense(100, activation="relu")) model.add(Dense(n_outputs, activation="softmax")) model.compile( loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"] ) return model
pol2cart
Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi)
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. # MASKED: pol2cart function (lines 75-96) def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y
75
96
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
cart2pol
Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y)
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y # MASKED: cart2pol function (lines 99-121) def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi
99
121
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
rotation_matrix_to_angle_axis
Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 # MASKED: rotation_matrix_to_angle_axis function (lines 295-322) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion)
295
322
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
normalize_quaternion
Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000])
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion # MASKED: normalize_quaternion function (lines 409-436) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps)
409
436
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
quaternion_log_to_exp
Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.])
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis # MASKED: quaternion_log_to_exp function (lines 552-588) def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp
552
588
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
quaternion_exp_to_log
Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.])
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp # MASKED: quaternion_exp_to_log function (lines 591-627) # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log
591
627
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
normalize_pixel_coordinates
Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates.
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 # MASKED: normalize_pixel_coordinates function (lines 689-720) def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1
689
720
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
denormalize_pixel_coordinates
Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates.
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 # MASKED: denormalize_pixel_coordinates function (lines 723-753) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
723
753
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
normalize_pixel_coordinates3d
Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates.
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) # MASKED: normalize_pixel_coordinates3d function (lines 756-787) def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1
756
787
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
denormalize_pixel_coordinates3d
Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates.
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 # MASKED: denormalize_pixel_coordinates3d function (lines 790-823)
def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
790
823
from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from kornia.constants import pi __all__ = [ # functional api "rad2deg", "deg2rad", "pol2cart", "cart2pol", "convert_points_from_homogeneous", "convert_points_to_homogeneous", "convert_affinematrix_to_homography", "convert_affinematrix_to_homography3d", "angle_axis_to_rotation_matrix", "angle_axis_to_quaternion", "rotation_matrix_to_angle_axis", "rotation_matrix_to_quaternion", "quaternion_to_angle_axis", "quaternion_to_rotation_matrix", "quaternion_log_to_exp", "quaternion_exp_to_log", "denormalize_pixel_coordinates", "normalize_pixel_coordinates", "normalize_quaternion", "denormalize_pixel_coordinates3d", "normalize_pixel_coordinates3d", ] def rad2deg(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from radians to degrees. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: Tensor with same shape as input. Example: >>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3) >>> output = rad2deg(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return 180. * tensor / pi.to(tensor.device).type(tensor.dtype) def deg2rad(tensor: torch.Tensor) -> torch.Tensor: r"""Function that converts angles from degrees to radians. Args: tensor (torch.Tensor): Tensor of arbitrary shape. Returns: torch.Tensor: tensor with same shape as input. Examples:: >>> input = 360. * torch.rand(1, 3, 3) >>> output = deg2rad(input) """ if not isinstance(tensor, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(tensor))) return tensor * pi.to(tensor.device).type(tensor.dtype) / 180. def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: r"""Function that converts polar coordinates to cartesian coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> rho = torch.rand(1, 3, 3) >>> phi = torch.rand(1, 3, 3) >>> x, y = pol2cart(rho, phi) """ if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(rho), type(phi))) x = rho * torch.cos(phi) y = rho * torch.sin(phi) return x, y def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]: """Function that converts cartesian coordinates to polar coordinates. Args: rho (torch.Tensor): Tensor of arbitrary shape. phi (torch.Tensor): Tensor of same arbitrary shape. eps (float): To avoid division by zero. Default is 1e-8 Returns: torch.Tensor, torch.Tensor: Tensor with same shape as input. Example: >>> x = torch.rand(1, 3, 3) >>> y = torch.rand(1, 3, 3) >>> rho, phi = cart2pol(x, y) """ if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)): raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format( type(x), type(y))) rho = torch.sqrt(x**2 + y**2 + eps) phi = torch.atan2(y, x) return rho, phi def convert_points_from_homogeneous( points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Function that converts points from homogeneous to Euclidean space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_from_homogeneous(input) # BxNx2 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) # we check for points at infinity z_vec: torch.Tensor = points[..., -1:] # set the results of division by zeror/near-zero to 1.0 # follow the convention of opencv: # https://github.com/opencv/opencv/pull/14411/files mask: torch.Tensor = torch.abs(z_vec) > eps scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_( mask, torch.tensor(1.0).to(points.device) / z_vec[mask]) return scale * points[..., :-1] def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor: r"""Function that converts points from Euclidean to homogeneous space. Examples:: >>> input = torch.rand(2, 4, 3) # BxNx3 >>> output = convert_points_to_homogeneous(input) # BxNx4 """ if not isinstance(points, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(points))) if len(points.shape) < 2: raise ValueError("Input must be at least a 2D tensor. Got {}".format( points.shape)) return torch.nn.functional.pad(points, [0, 1], "constant", 1.0) def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor: H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.) H[..., -1, -1] += 1.0 return H def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3]. Examples:: >>> input = torch.rand(2, 2, 3) # Bx2x3 >>> output = convert_affinematrix_to_homography(input) # Bx3x3 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)): raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor: r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4]. Examples:: >>> input = torch.rand(2, 3, 4) # Bx3x4 >>> output = convert_affinematrix_to_homography3d(input) # Bx4x4 """ if not isinstance(A, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(A))) if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)): raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}" .format(A.shape)) return _convert_affinematrix_to_homography_impl(A) def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 3x3 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 3, 3)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_axis_to_rotation_matrix(input) # Nx3x3 """ if not isinstance(angle_axis, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input size must be a (*, 3) tensor. Got {}".format( angle_axis.shape)) def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix # Nx3x3 def rotation_matrix_to_angle_axis( rotation_matrix: torch.Tensor) -> torch.Tensor: r"""Convert 3x3 rotation matrix to Rodrigues vector. Args: rotation_matrix (torch.Tensor): rotation matrix. Returns: torch.Tensor: Rodrigues vector transformation. Shape: - Input: :math:`(N, 3, 3)` - Output: :math:`(N, 3)` Example: >>> input = torch.rand(2, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_angle_axis(input) # Nx3 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix) return quaternion_to_angle_axis(quaternion) def rotation_matrix_to_quaternion( rotation_matrix: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Convert 3x3 rotation matrix to 4d quaternion vector. The quaternion vector has components in (x, y, z, w) format. Args: rotation_matrix (torch.Tensor): the rotation matrix to convert. eps (float): small value to avoid zero division. Default: 1e-8. Return: torch.Tensor: the rotation in quaternion. Shape: - Input: :math:`(*, 3, 3)` - Output: :math:`(*, 4)` Example: >>> input = torch.rand(4, 3, 3) # Nx3x3 >>> output = rotation_matrix_to_quaternion(input) # Nx4 """ if not isinstance(rotation_matrix, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(rotation_matrix))) if not rotation_matrix.shape[-2:] == (3, 3): raise ValueError( "Input size must be a (*, 3, 3) tensor. Got {}".format( rotation_matrix.shape)) def safe_zero_division(numerator: torch.Tensor, denominator: torch.Tensor) -> torch.Tensor: eps: float = torch.finfo(numerator.dtype).tiny # type: ignore return numerator / torch.clamp(denominator, min=eps) rotation_matrix_vec: torch.Tensor = rotation_matrix.view( *rotation_matrix.shape[:-2], 9) m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk( rotation_matrix_vec, chunks=9, dim=-1) trace: torch.Tensor = m00 + m11 + m22 def trace_positive_cond(): sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw. qw = 0.25 * sq qx = safe_zero_division(m21 - m12, sq) qy = safe_zero_division(m02 - m20, sq) qz = safe_zero_division(m10 - m01, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_1(): sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx. qw = safe_zero_division(m21 - m12, sq) qx = 0.25 * sq qy = safe_zero_division(m01 + m10, sq) qz = safe_zero_division(m02 + m20, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_2(): sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy. qw = safe_zero_division(m02 - m20, sq) qx = safe_zero_division(m01 + m10, sq) qy = 0.25 * sq qz = safe_zero_division(m12 + m21, sq) return torch.cat([qx, qy, qz, qw], dim=-1) def cond_3(): sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz. qw = safe_zero_division(m10 - m01, sq) qx = safe_zero_division(m02 + m20, sq) qy = safe_zero_division(m12 + m21, sq) qz = 0.25 * sq return torch.cat([qx, qy, qz, qw], dim=-1) where_2 = torch.where(m11 > m22, cond_2(), cond_3()) where_1 = torch.where( (m00 > m11) & (m00 > m22), cond_1(), where_2) quaternion: torch.Tensor = torch.where( trace > 0., trace_positive_cond(), where_1) return quaternion def normalize_quaternion(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor: r"""Normalizes a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be normalized. The tensor can be of shape :math:`(*, 4)`. eps (Optional[bool]): small value to avoid division by zero. Default: 1e-12. Return: torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([1., 0., 1., 0.]) >>> normalize_quaternion(quaternion) tensor([0.7071, 0.0000, 0.7071, 0.0000]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) return F.normalize(quaternion, p=2, dim=-1, eps=eps) # based on: # https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101 # https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247 def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor: r"""Converts a quaternion to a rotation matrix. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 1., 0.]) >>> quaternion_to_rotation_matrix(quaternion) tensor([[-1., 0., 0.], [ 0., -1., 0.], [ 0., 0., 1.]]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # normalize the input quaternion quaternion_norm: torch.Tensor = normalize_quaternion(quaternion) # unpack the normalized quaternion components x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1) # compute the actual conversion tx: torch.Tensor = 2.0 * x ty: torch.Tensor = 2.0 * y tz: torch.Tensor = 2.0 * z twx: torch.Tensor = tx * w twy: torch.Tensor = ty * w twz: torch.Tensor = tz * w txx: torch.Tensor = tx * x txy: torch.Tensor = ty * x txz: torch.Tensor = tz * x tyy: torch.Tensor = ty * y tyz: torch.Tensor = tz * y tzz: torch.Tensor = tz * z one: torch.Tensor = torch.tensor(1.) matrix: torch.Tensor = torch.stack([ one - (tyy + tzz), txy - twz, txz + twy, txy + twz, one - (txx + tzz), tyz - twx, txz - twy, tyz + twx, one - (txx + tyy) ], dim=-1).view(-1, 3, 3) if len(quaternion.shape) == 1: matrix = torch.squeeze(matrix, dim=0) return matrix def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor: """Convert quaternion vector to angle axis of rotation. The quaternion should be in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion (torch.Tensor): tensor with quaternions. Return: torch.Tensor: tensor with angle axis of rotation. Shape: - Input: :math:`(*, 4)` where `*` means, any number of dimensions - Output: :math:`(*, 3)` Example: >>> quaternion = torch.rand(2, 4) # Nx4 >>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3 """ if not torch.is_tensor(quaternion): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape Nx4 or 4. Got {}".format( quaternion.shape)) # unpack input and compute conversion q1: torch.Tensor = quaternion[..., 1] q2: torch.Tensor = quaternion[..., 2] q3: torch.Tensor = quaternion[..., 3] sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta) cos_theta: torch.Tensor = quaternion[..., 0] two_theta: torch.Tensor = 2.0 * torch.where( cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: torch.Tensor = two_theta / sin_theta k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta) k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg) angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3] angle_axis[..., 0] += q1 * k angle_axis[..., 1] += q2 * k angle_axis[..., 2] += q3 * k return angle_axis def quaternion_log_to_exp(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies exponential map to log quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 3)`. Return: torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`. Example: >>> quaternion = torch.tensor([0., 0., 0.]) >>> quaternion_log_to_exp(quaternion) tensor([0., 0., 0., 1.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape (*, 3). Got {}".format( quaternion.shape)) # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps) # compute scalar and vector quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q quaternion_scalar: torch.Tensor = torch.cos(norm_q) # compose quaternion and return quaternion_exp: torch.Tensor = torch.cat( [quaternion_vector, quaternion_scalar], dim=-1) return quaternion_exp def quaternion_exp_to_log(quaternion: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: r"""Applies the log map to a quaternion. The quaternion should be in (x, y, z, w) format. Args: quaternion (torch.Tensor): a tensor containing a quaternion to be converted. The tensor can be of shape :math:`(*, 4)`. Return: torch.Tensor: the quaternion log map of shape :math:`(*, 3)`. Example: >>> quaternion = torch.tensor([0., 0., 0., 1.]) >>> quaternion_exp_to_log(quaternion) tensor([0., 0., 0.]) """ if not isinstance(quaternion, torch.Tensor): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(quaternion))) if not quaternion.shape[-1] == 4: raise ValueError( "Input must be a tensor of shape (*, 4). Got {}".format( quaternion.shape)) # unpack quaternion vector and scalar quaternion_vector: torch.Tensor = quaternion[..., 0:3] quaternion_scalar: torch.Tensor = quaternion[..., 3:4] # compute quaternion norm norm_q: torch.Tensor = torch.norm( quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps) # apply log map quaternion_log: torch.Tensor = quaternion_vector * torch.acos( torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q return quaternion_log # based on: # https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138 def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor: r"""Convert an angle axis to a quaternion. The quaternion vector has components in (x, y, z, w) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: angle_axis (torch.Tensor): tensor with angle axis. Return: torch.Tensor: tensor with quaternion. Shape: - Input: :math:`(*, 3)` where `*` means, any number of dimensions - Output: :math:`(*, 4)` Example: >>> angle_axis = torch.rand(2, 3) # Nx3 >>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4 """ if not torch.is_tensor(angle_axis): raise TypeError("Input type is not a torch.Tensor. Got {}".format( type(angle_axis))) if not angle_axis.shape[-1] == 3: raise ValueError( "Input must be a tensor of shape Nx3 or 3. Got {}".format( angle_axis.shape)) # unpack input and compute conversion a0: torch.Tensor = angle_axis[..., 0:1] a1: torch.Tensor = angle_axis[..., 1:2] a2: torch.Tensor = angle_axis[..., 2:3] theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2 theta: torch.Tensor = torch.sqrt(theta_squared) half_theta: torch.Tensor = theta * 0.5 mask: torch.Tensor = theta_squared > 0.0 ones: torch.Tensor = torch.ones_like(half_theta) k_neg: torch.Tensor = 0.5 * ones k_pos: torch.Tensor = torch.sin(half_theta) / theta k: torch.Tensor = torch.where(mask, k_pos, k_neg) w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones) quaternion: torch.Tensor = torch.zeros_like(angle_axis) quaternion[..., 0:1] += a0 * k quaternion[..., 1:2] += a1 * k quaternion[..., 2:3] += a2 * k return torch.cat([w, quaternion], dim=-1) # based on: # https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71 def normalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype), torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) ]) factor: torch.Tensor = torch.tensor( 2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates( pixel_coordinates: torch.Tensor, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 2)`. width (int): the maximum width in the x-axis. height (int): the maximum height in the y-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 2: raise ValueError("Input pixel_coordinates must be of shape (*, 2). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor hw: torch.Tensor = torch.stack([ torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1) def normalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Normalize pixel coordinates between -1 and 1. Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the grid with pixel coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the z-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the normalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return factor * pixel_coordinates - 1 def denormalize_pixel_coordinates3d( pixel_coordinates: torch.Tensor, depth: int, height: int, width: int, eps: float = 1e-8) -> torch.Tensor: r"""Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates (torch.Tensor): the normalized grid coordinates. Shape can be :math:`(*, 3)`. depth (int): the maximum depth in the x-axis. height (int): the maximum height in the y-axis. width (int): the maximum width in the x-axis. eps (float): safe division by zero. (default 1e-8). Return: torch.Tensor: the denormalized pixel coordinates. """ if pixel_coordinates.shape[-1] != 3: raise ValueError("Input pixel_coordinates must be of shape (*, 3). " "Got {}".format(pixel_coordinates.shape)) # compute normalization factor dhw: torch.Tensor = torch.stack([ torch.tensor(depth), torch.tensor(width), torch.tensor(height) ]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps) return torch.tensor(1.) / factor * (pixel_coordinates + 1)
get
Get an existing SqlPoolsV3 resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._inputs import * __all__ = ['SqlPoolsV3Args', 'SqlPoolsV3'] @pulumi.input_type class SqlPoolsV3Args: def __init__(__self__, *, resource_group_name: pulumi.Input[str], workspace_name: pulumi.Input[str], location: Optional[pulumi.Input[str]] = None, sku: Optional[pulumi.Input['SkuArgs']] = None, sql_pool_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ The set of arguments for constructing a SqlPoolsV3 resource. :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive. :param pulumi.Input[str] workspace_name: The name of the workspace. :param pulumi.Input[str] location: The geo-location where the resource lives :param pulumi.Input['SkuArgs'] sku: The sql pool SKU. The list of SKUs may vary by region and support offer. :param pulumi.Input[str] sql_pool_name: The name of the sql pool. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. """ pulumi.set(__self__, "resource_group_name", resource_group_name) pulumi.set(__self__, "workspace_name", workspace_name) if location is not None: pulumi.set(__self__, "location", location) if sku is not None: pulumi.set(__self__, "sku", sku) if sql_pool_name is not None: pulumi.set(__self__, "sql_pool_name", sql_pool_name) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the resource group. The name is case insensitive. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="workspaceName") def workspace_name(self) -> pulumi.Input[str]: """ The name of the workspace. """ return pulumi.get(self, "workspace_name") @workspace_name.setter def workspace_name(self, value: pulumi.Input[str]): pulumi.set(self, "workspace_name", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: """ The geo-location where the resource lives """ return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter def sku(self) -> Optional[pulumi.Input['SkuArgs']]: """ The sql pool SKU. The list of SKUs may vary by region and support offer. """ return pulumi.get(self, "sku") @sku.setter def sku(self, value: Optional[pulumi.Input['SkuArgs']]): pulumi.set(self, "sku", value) @property @pulumi.getter(name="sqlPoolName") def sql_pool_name(self) -> Optional[pulumi.Input[str]]: """ The name of the sql pool. """ return pulumi.get(self, "sql_pool_name") @sql_pool_name.setter def sql_pool_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "sql_pool_name", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Resource tags. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) class SqlPoolsV3(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, location: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None, sql_pool_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, workspace_name: Optional[pulumi.Input[str]] = None, __props__=None): """ A sql pool resource. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] location: The geo-location where the resource lives :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive. :param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sql pool SKU. The list of SKUs may vary by region and support offer. :param pulumi.Input[str] sql_pool_name: The name of the sql pool. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. :param pulumi.Input[str] workspace_name: The name of the workspace. """ ... @overload def __init__(__self__, resource_name: str, args: SqlPoolsV3Args, opts: Optional[pulumi.ResourceOptions] = None): """ A sql pool resource. :param str resource_name: The name of the resource. :param SqlPoolsV3Args args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(SqlPoolsV3Args, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, location: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None, sql_pool_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, workspace_name: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = SqlPoolsV3Args.__new__(SqlPoolsV3Args) __props__.__dict__["location"] = location if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["sku"] = sku __props__.__dict__["sql_pool_name"] = sql_pool_name __props__.__dict__["tags"] = tags if workspace_name is None and not opts.urn: raise TypeError("Missing required property 'workspace_name'") __props__.__dict__["workspace_name"] = workspace_name __props__.__dict__["current_service_objective_name"] = None __props__.__dict__["kind"] = None __props__.__dict__["name"] = None __props__.__dict__["requested_service_objective_name"] = None __props__.__dict__["sql_pool_guid"] = None __props__.__dict__["status"] = None __props__.__dict__["system_data"] = None __props__.__dict__["type"] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:synapse/v20200401preview:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20190601preview:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20190601preview:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20201201:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20201201:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20210301:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20210301:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20210401preview:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20210401preview:SqlPoolsV3")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(SqlPoolsV3, __self__).__init__( 'azure-native:synapse/v20200401preview:SqlPoolsV3', resource_name, __props__, opts) # MASKED: get function (lines 209-236) @property @pulumi.getter(name="currentServiceObjectiveName") def current_service_objective_name(self) -> pulumi.Output[str]: """ The current service level objective name of the sql pool. """ return pulumi.get(self, "current_service_objective_name") @property @pulumi.getter def kind(self) -> pulumi.Output[str]: """ Kind of SqlPool. """ return pulumi.get(self, "kind") @property @pulumi.getter def location(self) -> pulumi.Output[str]: """ The geo-location where the resource lives """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the resource """ return pulumi.get(self, "name") @property @pulumi.getter(name="requestedServiceObjectiveName") def requested_service_objective_name(self) -> pulumi.Output[str]: """ The requested service level objective name of the sql pool. """ return pulumi.get(self, "requested_service_objective_name") @property @pulumi.getter def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]: """ The sql pool SKU. The list of SKUs may vary by region and support offer. """ return pulumi.get(self, "sku") @property @pulumi.getter(name="sqlPoolGuid") def sql_pool_guid(self) -> pulumi.Output[str]: """ The Guid of the sql pool. """ return pulumi.get(self, "sql_pool_guid") @property @pulumi.getter def status(self) -> pulumi.Output[str]: """ The status of the sql pool. """ return pulumi.get(self, "status") @property @pulumi.getter(name="systemData") def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']: """ SystemData of SqlPool. """ return pulumi.get(self, "system_data") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type")
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'SqlPoolsV3': """ Get an existing SqlPoolsV3 resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = SqlPoolsV3Args.__new__(SqlPoolsV3Args) __props__.__dict__["current_service_objective_name"] = None __props__.__dict__["kind"] = None __props__.__dict__["location"] = None __props__.__dict__["name"] = None __props__.__dict__["requested_service_objective_name"] = None __props__.__dict__["sku"] = None __props__.__dict__["sql_pool_guid"] = None __props__.__dict__["status"] = None __props__.__dict__["system_data"] = None __props__.__dict__["tags"] = None __props__.__dict__["type"] = None return SqlPoolsV3(resource_name, opts=opts, __props__=__props__)
209
236
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._inputs import * __all__ = ['SqlPoolsV3Args', 'SqlPoolsV3'] @pulumi.input_type class SqlPoolsV3Args: def __init__(__self__, *, resource_group_name: pulumi.Input[str], workspace_name: pulumi.Input[str], location: Optional[pulumi.Input[str]] = None, sku: Optional[pulumi.Input['SkuArgs']] = None, sql_pool_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ The set of arguments for constructing a SqlPoolsV3 resource. :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive. :param pulumi.Input[str] workspace_name: The name of the workspace. :param pulumi.Input[str] location: The geo-location where the resource lives :param pulumi.Input['SkuArgs'] sku: The sql pool SKU. The list of SKUs may vary by region and support offer. :param pulumi.Input[str] sql_pool_name: The name of the sql pool. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. """ pulumi.set(__self__, "resource_group_name", resource_group_name) pulumi.set(__self__, "workspace_name", workspace_name) if location is not None: pulumi.set(__self__, "location", location) if sku is not None: pulumi.set(__self__, "sku", sku) if sql_pool_name is not None: pulumi.set(__self__, "sql_pool_name", sql_pool_name) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the resource group. The name is case insensitive. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="workspaceName") def workspace_name(self) -> pulumi.Input[str]: """ The name of the workspace. """ return pulumi.get(self, "workspace_name") @workspace_name.setter def workspace_name(self, value: pulumi.Input[str]): pulumi.set(self, "workspace_name", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: """ The geo-location where the resource lives """ return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter def sku(self) -> Optional[pulumi.Input['SkuArgs']]: """ The sql pool SKU. The list of SKUs may vary by region and support offer. """ return pulumi.get(self, "sku") @sku.setter def sku(self, value: Optional[pulumi.Input['SkuArgs']]): pulumi.set(self, "sku", value) @property @pulumi.getter(name="sqlPoolName") def sql_pool_name(self) -> Optional[pulumi.Input[str]]: """ The name of the sql pool. """ return pulumi.get(self, "sql_pool_name") @sql_pool_name.setter def sql_pool_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "sql_pool_name", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Resource tags. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) class SqlPoolsV3(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, location: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None, sql_pool_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, workspace_name: Optional[pulumi.Input[str]] = None, __props__=None): """ A sql pool resource. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] location: The geo-location where the resource lives :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive. :param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sql pool SKU. The list of SKUs may vary by region and support offer. :param pulumi.Input[str] sql_pool_name: The name of the sql pool. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags. :param pulumi.Input[str] workspace_name: The name of the workspace. """ ... @overload def __init__(__self__, resource_name: str, args: SqlPoolsV3Args, opts: Optional[pulumi.ResourceOptions] = None): """ A sql pool resource. :param str resource_name: The name of the resource. :param SqlPoolsV3Args args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(SqlPoolsV3Args, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, location: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None, sql_pool_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, workspace_name: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = SqlPoolsV3Args.__new__(SqlPoolsV3Args) __props__.__dict__["location"] = location if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["sku"] = sku __props__.__dict__["sql_pool_name"] = sql_pool_name __props__.__dict__["tags"] = tags if workspace_name is None and not opts.urn: raise TypeError("Missing required property 'workspace_name'") __props__.__dict__["workspace_name"] = workspace_name __props__.__dict__["current_service_objective_name"] = None __props__.__dict__["kind"] = None __props__.__dict__["name"] = None __props__.__dict__["requested_service_objective_name"] = None __props__.__dict__["sql_pool_guid"] = None __props__.__dict__["status"] = None __props__.__dict__["system_data"] = None __props__.__dict__["type"] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:synapse/v20200401preview:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20190601preview:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20190601preview:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20201201:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20201201:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20210301:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20210301:SqlPoolsV3"), pulumi.Alias(type_="azure-native:synapse/v20210401preview:SqlPoolsV3"), pulumi.Alias(type_="azure-nextgen:synapse/v20210401preview:SqlPoolsV3")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(SqlPoolsV3, __self__).__init__( 'azure-native:synapse/v20200401preview:SqlPoolsV3', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'SqlPoolsV3': """ Get an existing SqlPoolsV3 resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = SqlPoolsV3Args.__new__(SqlPoolsV3Args) __props__.__dict__["current_service_objective_name"] = None __props__.__dict__["kind"] = None __props__.__dict__["location"] = None __props__.__dict__["name"] = None __props__.__dict__["requested_service_objective_name"] = None __props__.__dict__["sku"] = None __props__.__dict__["sql_pool_guid"] = None __props__.__dict__["status"] = None __props__.__dict__["system_data"] = None __props__.__dict__["tags"] = None __props__.__dict__["type"] = None return SqlPoolsV3(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="currentServiceObjectiveName") def current_service_objective_name(self) -> pulumi.Output[str]: """ The current service level objective name of the sql pool. """ return pulumi.get(self, "current_service_objective_name") @property @pulumi.getter def kind(self) -> pulumi.Output[str]: """ Kind of SqlPool. """ return pulumi.get(self, "kind") @property @pulumi.getter def location(self) -> pulumi.Output[str]: """ The geo-location where the resource lives """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the resource """ return pulumi.get(self, "name") @property @pulumi.getter(name="requestedServiceObjectiveName") def requested_service_objective_name(self) -> pulumi.Output[str]: """ The requested service level objective name of the sql pool. """ return pulumi.get(self, "requested_service_objective_name") @property @pulumi.getter def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]: """ The sql pool SKU. The list of SKUs may vary by region and support offer. """ return pulumi.get(self, "sku") @property @pulumi.getter(name="sqlPoolGuid") def sql_pool_guid(self) -> pulumi.Output[str]: """ The Guid of the sql pool. """ return pulumi.get(self, "sql_pool_guid") @property @pulumi.getter def status(self) -> pulumi.Output[str]: """ The status of the sql pool. """ return pulumi.get(self, "status") @property @pulumi.getter(name="systemData") def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']: """ SystemData of SqlPool. """ return pulumi.get(self, "system_data") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Resource tags. """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type")
act
Returns actions for each agent in this list. Args: agents: A list of agent objects. obs: A list of matching observations per agent. action_space: The action space for the environment using this model. is_communicative: Whether the action depends on communication observations as well. Returns a list of actions.
'''Module to manage and advanced game state''' from collections import defaultdict import numpy as np from . import constants from . import characters from . import utility class ForwardModel(object): """Class for helping with the [forward] modeling of the game state.""" def run(self, num_times, board, agents, bombs, items, flames, is_partially_observable, agent_view_size, action_space, training_agent=None, is_communicative=False): """Run the forward model. Args: num_times: The number of times to run it for. This is a maximum and it will stop early if we reach a done. board: The board state to run it from. agents: The agents to use to run it. bombs: The starting bombs. items: The starting items. flames: The starting flames. is_partially_observable: Whether the board is partially observable or not. Only applies to TeamRadio. agent_view_size: If it's partially observable, then the size of the square that the agent can view. action_space: The actions that each agent can take. training_agent: The training agent to pass to done. is_communicative: Whether the action depends on communication observations as well. Returns: steps: The list of step results, which are each a dict of "obs", "next_obs", "reward", "action". board: Updated board. agents: Updated agents, same models though. bombs: Updated bombs. items: Updated items. flames: Updated flames. done: Whether we completed the game in these steps. info: The result of the game if it's completed. """ steps = [] for _ in num_times: obs = self.get_observations( board, agents, bombs, is_partially_observable, agent_view_size) actions = self.act( agents, obs, action_space, is_communicative=is_communicative) board, agents, bombs, items, flames = self.step( actions, board, agents, bombs, items, flames) next_obs = self.get_observations( board, agents, bombs, is_partially_observable, agent_view_size) reward = self.get_rewards(agents, game_type, step_count, max_steps) done = self.get_done(agents, game_type, step_count, max_steps, training_agent) info = self.get_info(done, rewards, game_type, agents) steps.append({ "obs": obs, "next_obs": next_obs, "reward": reward, "actions": actions, }) if done: # Callback to let the agents know that the game has ended. for agent in agents: agent.episode_end(reward[agent.agent_id]) break return steps, board, agents, bombs, items, flames, done, info # MASKED: act function (lines 84-123) @staticmethod def step(actions, curr_board, curr_agents, curr_bombs, curr_items, curr_flames, max_blast_strength=10): board_size = len(curr_board) # Tick the flames. Replace any dead ones with passages. If there is an # item there, then reveal that item. flames = [] for flame in curr_flames: position = flame.position if flame.is_dead(): item_value = curr_items.get(position) if item_value: del curr_items[position] else: item_value = constants.Item.Passage.value curr_board[position] = item_value else: flame.tick() flames.append(flame) curr_flames = flames # Redraw all current flames # Multiple flames may share a position and the map should contain # a flame until all flames are dead to avoid issues with bomb # movements and explosions. for flame in curr_flames: curr_board[flame.position] = constants.Item.Flames.value # Step the living agents and moving bombs. # If two agents try to go to the same spot, they should bounce back to # their previous spots. This is complicated with one example being when # there are three agents all in a row. If the one in the middle tries # to go to the left and bounces with the one on the left, and then the # one on the right tried to go to the middle one's position, she should # also bounce. A way of doing this is to gather all the new positions # before taking any actions. Then, if there are disputes, correct those # disputes iteratively. # Additionally, if two agents try to switch spots by moving into each # Figure out desired next position for alive agents alive_agents = [agent for agent in curr_agents if agent.is_alive] desired_agent_positions = [agent.position for agent in alive_agents] for num_agent, agent in enumerate(alive_agents): position = agent.position # We change the curr_board here as a safeguard. We will later # update the agent's new position. curr_board[position] = constants.Item.Passage.value action = actions[agent.agent_id] if action == constants.Action.Stop.value: pass elif action == constants.Action.Bomb.value: position = agent.position if not utility.position_is_bomb(curr_bombs, position): bomb = agent.maybe_lay_bomb() if bomb: curr_bombs.append(bomb) elif utility.is_valid_direction(curr_board, position, action): desired_agent_positions[num_agent] = agent.get_next_position( action) # Gather desired next positions for moving bombs. Handle kicks later. desired_bomb_positions = [bomb.position for bomb in curr_bombs] for num_bomb, bomb in enumerate(curr_bombs): curr_board[bomb.position] = constants.Item.Passage.value if bomb.is_moving(): desired_position = utility.get_next_position( bomb.position, bomb.moving_direction) if utility.position_on_board(curr_board, desired_position) \ and not utility.position_is_powerup(curr_board, desired_position) \ and not utility.position_is_wall(curr_board, desired_position): desired_bomb_positions[num_bomb] = desired_position # Position switches: # Agent <-> Agent => revert both to previous position. # Bomb <-> Bomb => revert both to previous position. # Agent <-> Bomb => revert Bomb to previous position. crossings = {} def crossing(current, desired): '''Checks to see if an agent is crossing paths''' current_x, current_y = current desired_x, desired_y = desired if current_x != desired_x: assert current_y == desired_y return ('X', min(current_x, desired_x), current_y) assert current_x == desired_x return ('Y', current_x, min(current_y, desired_y)) for num_agent, agent in enumerate(alive_agents): if desired_agent_positions[num_agent] != agent.position: desired_position = desired_agent_positions[num_agent] border = crossing(agent.position, desired_position) if border in crossings: # Crossed another agent - revert both to prior positions. desired_agent_positions[num_agent] = agent.position num_agent2, _ = crossings[border] desired_agent_positions[num_agent2] = alive_agents[ num_agent2].position else: crossings[border] = (num_agent, True) for num_bomb, bomb in enumerate(curr_bombs): if desired_bomb_positions[num_bomb] != bomb.position: desired_position = desired_bomb_positions[num_bomb] border = crossing(bomb.position, desired_position) if border in crossings: # Crossed - revert to prior position. desired_bomb_positions[num_bomb] = bomb.position num, is_agent = crossings[border] if not is_agent: # Crossed bomb - revert that to prior position as well. desired_bomb_positions[num] = curr_bombs[num].position else: crossings[border] = (num_bomb, False) # Deal with multiple agents or multiple bomb collisions on desired next # position by resetting desired position to current position for # everyone involved in the collision. agent_occupancy = defaultdict(int) bomb_occupancy = defaultdict(int) for desired_position in desired_agent_positions: agent_occupancy[desired_position] += 1 for desired_position in desired_bomb_positions: bomb_occupancy[desired_position] += 1 # Resolve >=2 agents or >=2 bombs trying to occupy the same space. change = True while change: change = False for num_agent, agent in enumerate(alive_agents): desired_position = desired_agent_positions[num_agent] curr_position = agent.position # Either another agent is going to this position or more than # one bomb is going to this position. In both scenarios, revert # to the original position. if desired_position != curr_position and \ (agent_occupancy[desired_position] > 1 or bomb_occupancy[desired_position] > 1): desired_agent_positions[num_agent] = curr_position agent_occupancy[curr_position] += 1 change = True for num_bomb, bomb in enumerate(curr_bombs): desired_position = desired_bomb_positions[num_bomb] curr_position = bomb.position if desired_position != curr_position and \ (bomb_occupancy[desired_position] > 1 or agent_occupancy[desired_position] > 1): desired_bomb_positions[num_bomb] = curr_position bomb_occupancy[curr_position] += 1 change = True # Handle kicks. agent_indexed_by_kicked_bomb = {} kicked_bomb_indexed_by_agent = {} delayed_bomb_updates = [] delayed_agent_updates = [] # Loop through all bombs to see if they need a good kicking or cause # collisions with an agent. for num_bomb, bomb in enumerate(curr_bombs): desired_position = desired_bomb_positions[num_bomb] if agent_occupancy[desired_position] == 0: # There was never an agent around to kick or collide. continue agent_list = [ (num_agent, agent) for (num_agent, agent) in enumerate(alive_agents) \ if desired_position == desired_agent_positions[num_agent]] if not agent_list: # Agents moved from collision. continue # The agent_list should contain a single element at this point. assert (len(agent_list) == 1) num_agent, agent = agent_list[0] if desired_position == agent.position: # Agent did not move if desired_position != bomb.position: # Bomb moved, but agent did not. The bomb should revert # and stop. delayed_bomb_updates.append((num_bomb, bomb.position)) continue # NOTE: At this point, we have that the agent in question tried to # move into this position. if not agent.can_kick: # If we move the agent at this point, then we risk having two # agents on a square in future iterations of the loop. So we # push this change to the next stage instead. delayed_bomb_updates.append((num_bomb, bomb.position)) delayed_agent_updates.append((num_agent, agent.position)) continue # Agent moved and can kick - see if the target for the kick never had anyhing on it direction = constants.Action(actions[agent.agent_id]) target_position = utility.get_next_position(desired_position, direction) if utility.position_on_board(curr_board, target_position) and \ agent_occupancy[target_position] == 0 and \ bomb_occupancy[target_position] == 0 and \ not utility.position_is_powerup(curr_board, target_position) and \ not utility.position_is_wall(curr_board, target_position): # Ok to update bomb desired location as we won't iterate over it again here # but we can not update bomb_occupancy on target position and need to check it again # However we need to set the bomb count on the current position to zero so # that the agent can stay on this position. bomb_occupancy[desired_position] = 0 delayed_bomb_updates.append((num_bomb, target_position)) agent_indexed_by_kicked_bomb[num_bomb] = num_agent kicked_bomb_indexed_by_agent[num_agent] = num_bomb bomb.moving_direction = direction # Bombs may still collide and we then need to reverse bomb and agent .. else: delayed_bomb_updates.append((num_bomb, bomb.position)) delayed_agent_updates.append((num_agent, agent.position)) for (num_bomb, bomb_position) in delayed_bomb_updates: desired_bomb_positions[num_bomb] = bomb_position bomb_occupancy[bomb_position] += 1 change = True for (num_agent, agent_position) in delayed_agent_updates: desired_agent_positions[num_agent] = agent_position agent_occupancy[agent_position] += 1 change = True while change: change = False for num_agent, agent in enumerate(alive_agents): desired_position = desired_agent_positions[num_agent] curr_position = agent.position # Agents and bombs can only share a square if they are both in their # original position (Agent dropped bomb and has not moved) if desired_position != curr_position and \ (agent_occupancy[desired_position] > 1 or bomb_occupancy[desired_position] != 0): # Late collisions resulting from failed kicks force this agent to stay at the # original position. Check if this agent successfully kicked a bomb above and undo # the kick. if num_agent in kicked_bomb_indexed_by_agent: num_bomb = kicked_bomb_indexed_by_agent[num_agent] bomb = curr_bombs[num_bomb] desired_bomb_positions[num_bomb] = bomb.position bomb_occupancy[bomb.position] += 1 del agent_indexed_by_kicked_bomb[num_bomb] del kicked_bomb_indexed_by_agent[num_agent] desired_agent_positions[num_agent] = curr_position agent_occupancy[curr_position] += 1 change = True for num_bomb, bomb in enumerate(curr_bombs): desired_position = desired_bomb_positions[num_bomb] curr_position = bomb.position # This bomb may be a boomerang, i.e. it was kicked back to the # original location it moved from. If it is blocked now, it # can't be kicked and the agent needs to move back to stay # consistent with other movements. if desired_position == curr_position and num_bomb not in agent_indexed_by_kicked_bomb: continue bomb_occupancy_ = bomb_occupancy[desired_position] agent_occupancy_ = agent_occupancy[desired_position] # Agents and bombs can only share a square if they are both in their # original position (Agent dropped bomb and has not moved) if bomb_occupancy_ > 1 or agent_occupancy_ != 0: desired_bomb_positions[num_bomb] = curr_position bomb_occupancy[curr_position] += 1 num_agent = agent_indexed_by_kicked_bomb.get(num_bomb) if num_agent is not None: agent = alive_agents[num_agent] desired_agent_positions[num_agent] = agent.position agent_occupancy[agent.position] += 1 del kicked_bomb_indexed_by_agent[num_agent] del agent_indexed_by_kicked_bomb[num_bomb] change = True for num_bomb, bomb in enumerate(curr_bombs): if desired_bomb_positions[num_bomb] == bomb.position and \ not num_bomb in agent_indexed_by_kicked_bomb: # Bomb was not kicked this turn and its desired position is its # current location. Stop it just in case it was moving before. bomb.stop() else: # Move bomb to the new position. # NOTE: We already set the moving direction up above. bomb.position = desired_bomb_positions[num_bomb] for num_agent, agent in enumerate(alive_agents): if desired_agent_positions[num_agent] != agent.position: agent.move(actions[agent.agent_id]) if utility.position_is_powerup(curr_board, agent.position): agent.pick_up( constants.Item(curr_board[agent.position]), max_blast_strength=max_blast_strength) # Explode bombs. exploded_map = np.zeros_like(curr_board) has_new_explosions = False for bomb in curr_bombs: bomb.tick() if bomb.exploded(): has_new_explosions = True elif curr_board[bomb.position] == constants.Item.Flames.value: bomb.fire() has_new_explosions = True # Chain the explosions. while has_new_explosions: next_bombs = [] has_new_explosions = False for bomb in curr_bombs: if not bomb.exploded(): next_bombs.append(bomb) continue bomb.bomber.incr_ammo() for _, indices in bomb.explode().items(): for r, c in indices: if not all( [r >= 0, c >= 0, r < board_size, c < board_size]): break if curr_board[r][c] == constants.Item.Rigid.value: break exploded_map[r][c] = 1 if curr_board[r][c] == constants.Item.Wood.value: break curr_bombs = next_bombs for bomb in curr_bombs: if bomb.in_range(exploded_map): bomb.fire() has_new_explosions = True # Update the board's bombs. for bomb in curr_bombs: curr_board[bomb.position] = constants.Item.Bomb.value # Update the board's flames. flame_positions = np.where(exploded_map == 1) for row, col in zip(flame_positions[0], flame_positions[1]): curr_flames.append(characters.Flame((row, col))) for flame in curr_flames: curr_board[flame.position] = constants.Item.Flames.value # Kill agents on flames. Otherwise, update position on curr_board. for agent in alive_agents: if curr_board[agent.position] == constants.Item.Flames.value: agent.die() else: curr_board[agent.position] = utility.agent_value(agent.agent_id) return curr_board, curr_agents, curr_bombs, curr_items, curr_flames def get_observations(self, curr_board, agents, bombs, is_partially_observable, agent_view_size, game_type, game_env): """Gets the observations as an np.array of the visible squares. The agent gets to choose whether it wants to keep the fogged part in memory. """ board_size = len(curr_board) def make_bomb_maps(position): ''' Makes an array of an agents bombs and the bombs attributes ''' blast_strengths = np.zeros((board_size, board_size)) life = np.zeros((board_size, board_size)) for bomb in bombs: x, y = bomb.position if not is_partially_observable \ or in_view_range(position, x, y): blast_strengths[(x, y)] = bomb.blast_strength life[(x, y)] = bomb.life return blast_strengths, life def in_view_range(position, v_row, v_col): '''Checks to see if a tile is in an agents viewing area''' row, col = position return all([ row >= v_row - agent_view_size, row <= v_row + agent_view_size, col >= v_col - agent_view_size, col <= v_col + agent_view_size ]) attrs = [ 'position', 'blast_strength', 'can_kick', 'teammate', 'ammo', 'enemies' ] alive_agents = [ utility.agent_value(agent.agent_id) for agent in agents if agent.is_alive ] observations = [] for agent in agents: agent_obs = {'alive': alive_agents} board = curr_board if is_partially_observable: board = board.copy() for row in range(board_size): for col in range(board_size): if not in_view_range(agent.position, row, col): board[row, col] = constants.Item.Fog.value agent_obs['board'] = board bomb_blast_strengths, bomb_life = make_bomb_maps(agent.position) agent_obs['bomb_blast_strength'] = bomb_blast_strengths agent_obs['bomb_life'] = bomb_life agent_obs['game_type'] = game_type.value agent_obs['game_env'] = game_env for attr in attrs: assert hasattr(agent, attr) agent_obs[attr] = getattr(agent, attr) observations.append(agent_obs) return observations @staticmethod def get_done(agents, step_count, max_steps, game_type, training_agent): # print('get_done called...', training_agent) alive = [agent for agent in agents if agent.is_alive] alive_ids = sorted([agent.agent_id for agent in alive]) if step_count >= max_steps: print('gameover : max timestep over') return True elif game_type == constants.GameType.FFA: if training_agent is not None and training_agent not in alive_ids: print('gameover : ffa training_agent has died') return True if len(alive) <= 1: print('checkout : ffa only %s player survived' % len(alive)) return len(alive) <= 1 elif len(alive_ids) <= 1: print('gameover : only one player survived') return True elif alive_ids == [0, 2]: print('gameover : [0,2] team won') return True elif any([ alive_ids == [1, 3] ]): print('gameover : [1,3] team won') return True return False @staticmethod def get_info(done, rewards, game_type, agents): if game_type == constants.GameType.FFA: alive = [agent for agent in agents if agent.is_alive] if done: if len(alive) != 1: # Either we have more than 1 alive (reached max steps) or # we have 0 alive (last agents died at the same time). return { 'result': constants.Result.Tie, } else: return { 'result': constants.Result.Win, 'winners': [num for num, reward in enumerate(rewards) \ if reward == 1] } else: return { 'result': constants.Result.Incomplete, } elif done: # We are playing a team game. if rewards == [-1] * 4: return { 'result': constants.Result.Tie, } else: return { 'result': constants.Result.Win, 'winners': [num for num, reward in enumerate(rewards) \ if reward == 1], } else: return { 'result': constants.Result.Incomplete, } @staticmethod def get_rewards(agents, game_type, step_count, max_steps): print('get_rewards called..', self.training_agent) def any_lst_equal(lst, values): '''Checks if list are equal''' return any([lst == v for v in values]) alive_agents = [num for num, agent in enumerate(agents) \ if agent.is_alive] if game_type == constants.GameType.FFA: if len(alive_agents) == 1: # An agent won. Give them +1, others -1. return [2 * int(agent.is_alive) - 1 for agent in agents] elif step_count >= max_steps: # Game is over from time. Everyone gets -1. return [-1] * 4 else: # Game running: 0 for alive, -1 for dead. return [int(agent.is_alive) - 1 for agent in agents] else: # We are playing a team game. if any_lst_equal(alive_agents, [[0, 2], [0], [2]]): # Team [0, 2] wins. return [1, -1, 1, -1] elif any_lst_equal(alive_agents, [[1, 3], [1], [3]]): # Team [1, 3] wins. return [-1, 1, -1, 1] elif step_count >= max_steps: # Game is over by max_steps. All agents tie. return [-1] * 4 elif len(alive_agents) == 0: # Everyone's dead. All agents tie. return [-1] * 4 else: # No team has yet won or lost. return [0] * 4
@staticmethod def act(agents, obs, action_space, is_communicative=False): """Returns actions for each agent in this list. Args: agents: A list of agent objects. obs: A list of matching observations per agent. action_space: The action space for the environment using this model. is_communicative: Whether the action depends on communication observations as well. Returns a list of actions. """ def act_ex_communication(agent): '''Handles agent's move without communication''' if agent.is_alive: return agent.act(obs[agent.agent_id], action_space=action_space) else: return constants.Action.Stop.value def act_with_communication(agent): '''Handles agent's move with communication''' if agent.is_alive: action = agent.act( obs[agent.agent_id], action_space=action_space) if type(action) == int: action = [action] + [0, 0] assert (type(action) == list) return action else: return [constants.Action.Stop.value, 0, 0] ret = [] for agent in agents: if is_communicative: ret.append(act_with_communication(agent)) else: ret.append(act_ex_communication(agent)) return ret
84
123
'''Module to manage and advanced game state''' from collections import defaultdict import numpy as np from . import constants from . import characters from . import utility class ForwardModel(object): """Class for helping with the [forward] modeling of the game state.""" def run(self, num_times, board, agents, bombs, items, flames, is_partially_observable, agent_view_size, action_space, training_agent=None, is_communicative=False): """Run the forward model. Args: num_times: The number of times to run it for. This is a maximum and it will stop early if we reach a done. board: The board state to run it from. agents: The agents to use to run it. bombs: The starting bombs. items: The starting items. flames: The starting flames. is_partially_observable: Whether the board is partially observable or not. Only applies to TeamRadio. agent_view_size: If it's partially observable, then the size of the square that the agent can view. action_space: The actions that each agent can take. training_agent: The training agent to pass to done. is_communicative: Whether the action depends on communication observations as well. Returns: steps: The list of step results, which are each a dict of "obs", "next_obs", "reward", "action". board: Updated board. agents: Updated agents, same models though. bombs: Updated bombs. items: Updated items. flames: Updated flames. done: Whether we completed the game in these steps. info: The result of the game if it's completed. """ steps = [] for _ in num_times: obs = self.get_observations( board, agents, bombs, is_partially_observable, agent_view_size) actions = self.act( agents, obs, action_space, is_communicative=is_communicative) board, agents, bombs, items, flames = self.step( actions, board, agents, bombs, items, flames) next_obs = self.get_observations( board, agents, bombs, is_partially_observable, agent_view_size) reward = self.get_rewards(agents, game_type, step_count, max_steps) done = self.get_done(agents, game_type, step_count, max_steps, training_agent) info = self.get_info(done, rewards, game_type, agents) steps.append({ "obs": obs, "next_obs": next_obs, "reward": reward, "actions": actions, }) if done: # Callback to let the agents know that the game has ended. for agent in agents: agent.episode_end(reward[agent.agent_id]) break return steps, board, agents, bombs, items, flames, done, info @staticmethod def act(agents, obs, action_space, is_communicative=False): """Returns actions for each agent in this list. Args: agents: A list of agent objects. obs: A list of matching observations per agent. action_space: The action space for the environment using this model. is_communicative: Whether the action depends on communication observations as well. Returns a list of actions. """ def act_ex_communication(agent): '''Handles agent's move without communication''' if agent.is_alive: return agent.act(obs[agent.agent_id], action_space=action_space) else: return constants.Action.Stop.value def act_with_communication(agent): '''Handles agent's move with communication''' if agent.is_alive: action = agent.act( obs[agent.agent_id], action_space=action_space) if type(action) == int: action = [action] + [0, 0] assert (type(action) == list) return action else: return [constants.Action.Stop.value, 0, 0] ret = [] for agent in agents: if is_communicative: ret.append(act_with_communication(agent)) else: ret.append(act_ex_communication(agent)) return ret @staticmethod def step(actions, curr_board, curr_agents, curr_bombs, curr_items, curr_flames, max_blast_strength=10): board_size = len(curr_board) # Tick the flames. Replace any dead ones with passages. If there is an # item there, then reveal that item. flames = [] for flame in curr_flames: position = flame.position if flame.is_dead(): item_value = curr_items.get(position) if item_value: del curr_items[position] else: item_value = constants.Item.Passage.value curr_board[position] = item_value else: flame.tick() flames.append(flame) curr_flames = flames # Redraw all current flames # Multiple flames may share a position and the map should contain # a flame until all flames are dead to avoid issues with bomb # movements and explosions. for flame in curr_flames: curr_board[flame.position] = constants.Item.Flames.value # Step the living agents and moving bombs. # If two agents try to go to the same spot, they should bounce back to # their previous spots. This is complicated with one example being when # there are three agents all in a row. If the one in the middle tries # to go to the left and bounces with the one on the left, and then the # one on the right tried to go to the middle one's position, she should # also bounce. A way of doing this is to gather all the new positions # before taking any actions. Then, if there are disputes, correct those # disputes iteratively. # Additionally, if two agents try to switch spots by moving into each # Figure out desired next position for alive agents alive_agents = [agent for agent in curr_agents if agent.is_alive] desired_agent_positions = [agent.position for agent in alive_agents] for num_agent, agent in enumerate(alive_agents): position = agent.position # We change the curr_board here as a safeguard. We will later # update the agent's new position. curr_board[position] = constants.Item.Passage.value action = actions[agent.agent_id] if action == constants.Action.Stop.value: pass elif action == constants.Action.Bomb.value: position = agent.position if not utility.position_is_bomb(curr_bombs, position): bomb = agent.maybe_lay_bomb() if bomb: curr_bombs.append(bomb) elif utility.is_valid_direction(curr_board, position, action): desired_agent_positions[num_agent] = agent.get_next_position( action) # Gather desired next positions for moving bombs. Handle kicks later. desired_bomb_positions = [bomb.position for bomb in curr_bombs] for num_bomb, bomb in enumerate(curr_bombs): curr_board[bomb.position] = constants.Item.Passage.value if bomb.is_moving(): desired_position = utility.get_next_position( bomb.position, bomb.moving_direction) if utility.position_on_board(curr_board, desired_position) \ and not utility.position_is_powerup(curr_board, desired_position) \ and not utility.position_is_wall(curr_board, desired_position): desired_bomb_positions[num_bomb] = desired_position # Position switches: # Agent <-> Agent => revert both to previous position. # Bomb <-> Bomb => revert both to previous position. # Agent <-> Bomb => revert Bomb to previous position. crossings = {} def crossing(current, desired): '''Checks to see if an agent is crossing paths''' current_x, current_y = current desired_x, desired_y = desired if current_x != desired_x: assert current_y == desired_y return ('X', min(current_x, desired_x), current_y) assert current_x == desired_x return ('Y', current_x, min(current_y, desired_y)) for num_agent, agent in enumerate(alive_agents): if desired_agent_positions[num_agent] != agent.position: desired_position = desired_agent_positions[num_agent] border = crossing(agent.position, desired_position) if border in crossings: # Crossed another agent - revert both to prior positions. desired_agent_positions[num_agent] = agent.position num_agent2, _ = crossings[border] desired_agent_positions[num_agent2] = alive_agents[ num_agent2].position else: crossings[border] = (num_agent, True) for num_bomb, bomb in enumerate(curr_bombs): if desired_bomb_positions[num_bomb] != bomb.position: desired_position = desired_bomb_positions[num_bomb] border = crossing(bomb.position, desired_position) if border in crossings: # Crossed - revert to prior position. desired_bomb_positions[num_bomb] = bomb.position num, is_agent = crossings[border] if not is_agent: # Crossed bomb - revert that to prior position as well. desired_bomb_positions[num] = curr_bombs[num].position else: crossings[border] = (num_bomb, False) # Deal with multiple agents or multiple bomb collisions on desired next # position by resetting desired position to current position for # everyone involved in the collision. agent_occupancy = defaultdict(int) bomb_occupancy = defaultdict(int) for desired_position in desired_agent_positions: agent_occupancy[desired_position] += 1 for desired_position in desired_bomb_positions: bomb_occupancy[desired_position] += 1 # Resolve >=2 agents or >=2 bombs trying to occupy the same space. change = True while change: change = False for num_agent, agent in enumerate(alive_agents): desired_position = desired_agent_positions[num_agent] curr_position = agent.position # Either another agent is going to this position or more than # one bomb is going to this position. In both scenarios, revert # to the original position. if desired_position != curr_position and \ (agent_occupancy[desired_position] > 1 or bomb_occupancy[desired_position] > 1): desired_agent_positions[num_agent] = curr_position agent_occupancy[curr_position] += 1 change = True for num_bomb, bomb in enumerate(curr_bombs): desired_position = desired_bomb_positions[num_bomb] curr_position = bomb.position if desired_position != curr_position and \ (bomb_occupancy[desired_position] > 1 or agent_occupancy[desired_position] > 1): desired_bomb_positions[num_bomb] = curr_position bomb_occupancy[curr_position] += 1 change = True # Handle kicks. agent_indexed_by_kicked_bomb = {} kicked_bomb_indexed_by_agent = {} delayed_bomb_updates = [] delayed_agent_updates = [] # Loop through all bombs to see if they need a good kicking or cause # collisions with an agent. for num_bomb, bomb in enumerate(curr_bombs): desired_position = desired_bomb_positions[num_bomb] if agent_occupancy[desired_position] == 0: # There was never an agent around to kick or collide. continue agent_list = [ (num_agent, agent) for (num_agent, agent) in enumerate(alive_agents) \ if desired_position == desired_agent_positions[num_agent]] if not agent_list: # Agents moved from collision. continue # The agent_list should contain a single element at this point. assert (len(agent_list) == 1) num_agent, agent = agent_list[0] if desired_position == agent.position: # Agent did not move if desired_position != bomb.position: # Bomb moved, but agent did not. The bomb should revert # and stop. delayed_bomb_updates.append((num_bomb, bomb.position)) continue # NOTE: At this point, we have that the agent in question tried to # move into this position. if not agent.can_kick: # If we move the agent at this point, then we risk having two # agents on a square in future iterations of the loop. So we # push this change to the next stage instead. delayed_bomb_updates.append((num_bomb, bomb.position)) delayed_agent_updates.append((num_agent, agent.position)) continue # Agent moved and can kick - see if the target for the kick never had anyhing on it direction = constants.Action(actions[agent.agent_id]) target_position = utility.get_next_position(desired_position, direction) if utility.position_on_board(curr_board, target_position) and \ agent_occupancy[target_position] == 0 and \ bomb_occupancy[target_position] == 0 and \ not utility.position_is_powerup(curr_board, target_position) and \ not utility.position_is_wall(curr_board, target_position): # Ok to update bomb desired location as we won't iterate over it again here # but we can not update bomb_occupancy on target position and need to check it again # However we need to set the bomb count on the current position to zero so # that the agent can stay on this position. bomb_occupancy[desired_position] = 0 delayed_bomb_updates.append((num_bomb, target_position)) agent_indexed_by_kicked_bomb[num_bomb] = num_agent kicked_bomb_indexed_by_agent[num_agent] = num_bomb bomb.moving_direction = direction # Bombs may still collide and we then need to reverse bomb and agent .. else: delayed_bomb_updates.append((num_bomb, bomb.position)) delayed_agent_updates.append((num_agent, agent.position)) for (num_bomb, bomb_position) in delayed_bomb_updates: desired_bomb_positions[num_bomb] = bomb_position bomb_occupancy[bomb_position] += 1 change = True for (num_agent, agent_position) in delayed_agent_updates: desired_agent_positions[num_agent] = agent_position agent_occupancy[agent_position] += 1 change = True while change: change = False for num_agent, agent in enumerate(alive_agents): desired_position = desired_agent_positions[num_agent] curr_position = agent.position # Agents and bombs can only share a square if they are both in their # original position (Agent dropped bomb and has not moved) if desired_position != curr_position and \ (agent_occupancy[desired_position] > 1 or bomb_occupancy[desired_position] != 0): # Late collisions resulting from failed kicks force this agent to stay at the # original position. Check if this agent successfully kicked a bomb above and undo # the kick. if num_agent in kicked_bomb_indexed_by_agent: num_bomb = kicked_bomb_indexed_by_agent[num_agent] bomb = curr_bombs[num_bomb] desired_bomb_positions[num_bomb] = bomb.position bomb_occupancy[bomb.position] += 1 del agent_indexed_by_kicked_bomb[num_bomb] del kicked_bomb_indexed_by_agent[num_agent] desired_agent_positions[num_agent] = curr_position agent_occupancy[curr_position] += 1 change = True for num_bomb, bomb in enumerate(curr_bombs): desired_position = desired_bomb_positions[num_bomb] curr_position = bomb.position # This bomb may be a boomerang, i.e. it was kicked back to the # original location it moved from. If it is blocked now, it # can't be kicked and the agent needs to move back to stay # consistent with other movements. if desired_position == curr_position and num_bomb not in agent_indexed_by_kicked_bomb: continue bomb_occupancy_ = bomb_occupancy[desired_position] agent_occupancy_ = agent_occupancy[desired_position] # Agents and bombs can only share a square if they are both in their # original position (Agent dropped bomb and has not moved) if bomb_occupancy_ > 1 or agent_occupancy_ != 0: desired_bomb_positions[num_bomb] = curr_position bomb_occupancy[curr_position] += 1 num_agent = agent_indexed_by_kicked_bomb.get(num_bomb) if num_agent is not None: agent = alive_agents[num_agent] desired_agent_positions[num_agent] = agent.position agent_occupancy[agent.position] += 1 del kicked_bomb_indexed_by_agent[num_agent] del agent_indexed_by_kicked_bomb[num_bomb] change = True for num_bomb, bomb in enumerate(curr_bombs): if desired_bomb_positions[num_bomb] == bomb.position and \ not num_bomb in agent_indexed_by_kicked_bomb: # Bomb was not kicked this turn and its desired position is its # current location. Stop it just in case it was moving before. bomb.stop() else: # Move bomb to the new position. # NOTE: We already set the moving direction up above. bomb.position = desired_bomb_positions[num_bomb] for num_agent, agent in enumerate(alive_agents): if desired_agent_positions[num_agent] != agent.position: agent.move(actions[agent.agent_id]) if utility.position_is_powerup(curr_board, agent.position): agent.pick_up( constants.Item(curr_board[agent.position]), max_blast_strength=max_blast_strength) # Explode bombs. exploded_map = np.zeros_like(curr_board) has_new_explosions = False for bomb in curr_bombs: bomb.tick() if bomb.exploded(): has_new_explosions = True elif curr_board[bomb.position] == constants.Item.Flames.value: bomb.fire() has_new_explosions = True # Chain the explosions. while has_new_explosions: next_bombs = [] has_new_explosions = False for bomb in curr_bombs: if not bomb.exploded(): next_bombs.append(bomb) continue bomb.bomber.incr_ammo() for _, indices in bomb.explode().items(): for r, c in indices: if not all( [r >= 0, c >= 0, r < board_size, c < board_size]): break if curr_board[r][c] == constants.Item.Rigid.value: break exploded_map[r][c] = 1 if curr_board[r][c] == constants.Item.Wood.value: break curr_bombs = next_bombs for bomb in curr_bombs: if bomb.in_range(exploded_map): bomb.fire() has_new_explosions = True # Update the board's bombs. for bomb in curr_bombs: curr_board[bomb.position] = constants.Item.Bomb.value # Update the board's flames. flame_positions = np.where(exploded_map == 1) for row, col in zip(flame_positions[0], flame_positions[1]): curr_flames.append(characters.Flame((row, col))) for flame in curr_flames: curr_board[flame.position] = constants.Item.Flames.value # Kill agents on flames. Otherwise, update position on curr_board. for agent in alive_agents: if curr_board[agent.position] == constants.Item.Flames.value: agent.die() else: curr_board[agent.position] = utility.agent_value(agent.agent_id) return curr_board, curr_agents, curr_bombs, curr_items, curr_flames def get_observations(self, curr_board, agents, bombs, is_partially_observable, agent_view_size, game_type, game_env): """Gets the observations as an np.array of the visible squares. The agent gets to choose whether it wants to keep the fogged part in memory. """ board_size = len(curr_board) def make_bomb_maps(position): ''' Makes an array of an agents bombs and the bombs attributes ''' blast_strengths = np.zeros((board_size, board_size)) life = np.zeros((board_size, board_size)) for bomb in bombs: x, y = bomb.position if not is_partially_observable \ or in_view_range(position, x, y): blast_strengths[(x, y)] = bomb.blast_strength life[(x, y)] = bomb.life return blast_strengths, life def in_view_range(position, v_row, v_col): '''Checks to see if a tile is in an agents viewing area''' row, col = position return all([ row >= v_row - agent_view_size, row <= v_row + agent_view_size, col >= v_col - agent_view_size, col <= v_col + agent_view_size ]) attrs = [ 'position', 'blast_strength', 'can_kick', 'teammate', 'ammo', 'enemies' ] alive_agents = [ utility.agent_value(agent.agent_id) for agent in agents if agent.is_alive ] observations = [] for agent in agents: agent_obs = {'alive': alive_agents} board = curr_board if is_partially_observable: board = board.copy() for row in range(board_size): for col in range(board_size): if not in_view_range(agent.position, row, col): board[row, col] = constants.Item.Fog.value agent_obs['board'] = board bomb_blast_strengths, bomb_life = make_bomb_maps(agent.position) agent_obs['bomb_blast_strength'] = bomb_blast_strengths agent_obs['bomb_life'] = bomb_life agent_obs['game_type'] = game_type.value agent_obs['game_env'] = game_env for attr in attrs: assert hasattr(agent, attr) agent_obs[attr] = getattr(agent, attr) observations.append(agent_obs) return observations @staticmethod def get_done(agents, step_count, max_steps, game_type, training_agent): # print('get_done called...', training_agent) alive = [agent for agent in agents if agent.is_alive] alive_ids = sorted([agent.agent_id for agent in alive]) if step_count >= max_steps: print('gameover : max timestep over') return True elif game_type == constants.GameType.FFA: if training_agent is not None and training_agent not in alive_ids: print('gameover : ffa training_agent has died') return True if len(alive) <= 1: print('checkout : ffa only %s player survived' % len(alive)) return len(alive) <= 1 elif len(alive_ids) <= 1: print('gameover : only one player survived') return True elif alive_ids == [0, 2]: print('gameover : [0,2] team won') return True elif any([ alive_ids == [1, 3] ]): print('gameover : [1,3] team won') return True return False @staticmethod def get_info(done, rewards, game_type, agents): if game_type == constants.GameType.FFA: alive = [agent for agent in agents if agent.is_alive] if done: if len(alive) != 1: # Either we have more than 1 alive (reached max steps) or # we have 0 alive (last agents died at the same time). return { 'result': constants.Result.Tie, } else: return { 'result': constants.Result.Win, 'winners': [num for num, reward in enumerate(rewards) \ if reward == 1] } else: return { 'result': constants.Result.Incomplete, } elif done: # We are playing a team game. if rewards == [-1] * 4: return { 'result': constants.Result.Tie, } else: return { 'result': constants.Result.Win, 'winners': [num for num, reward in enumerate(rewards) \ if reward == 1], } else: return { 'result': constants.Result.Incomplete, } @staticmethod def get_rewards(agents, game_type, step_count, max_steps): print('get_rewards called..', self.training_agent) def any_lst_equal(lst, values): '''Checks if list are equal''' return any([lst == v for v in values]) alive_agents = [num for num, agent in enumerate(agents) \ if agent.is_alive] if game_type == constants.GameType.FFA: if len(alive_agents) == 1: # An agent won. Give them +1, others -1. return [2 * int(agent.is_alive) - 1 for agent in agents] elif step_count >= max_steps: # Game is over from time. Everyone gets -1. return [-1] * 4 else: # Game running: 0 for alive, -1 for dead. return [int(agent.is_alive) - 1 for agent in agents] else: # We are playing a team game. if any_lst_equal(alive_agents, [[0, 2], [0], [2]]): # Team [0, 2] wins. return [1, -1, 1, -1] elif any_lst_equal(alive_agents, [[1, 3], [1], [3]]): # Team [1, 3] wins. return [-1, 1, -1, 1] elif step_count >= max_steps: # Game is over by max_steps. All agents tie. return [-1] * 4 elif len(alive_agents) == 0: # Everyone's dead. All agents tie. return [-1] * 4 else: # No team has yet won or lost. return [0] * 4
get_index_page
Handle requests which urls don't end with '.html' (for example, '/doc/') We don't need any generator here, because such urls are equivalent to the same urls with 'index.html' at the end. :param page_path: str :return: str
import copy import datetime import glob import json import os import sys import threading from os import path from urllib.parse import urlparse, urljoin, ParseResult import xmltodict import yaml from bs4 import BeautifulSoup from flask import Flask, render_template, Response, send_from_directory, request from flask.views import View from flask.helpers import url_for, send_file, make_response from flask_frozen import Freezer, walk_directory from hashlib import md5 from yaml import FullLoader from src.Feature import Feature from src.dist import get_dist_pages from src.github import assert_valid_git_hub_url from src.navigation import process_video_nav, process_nav, get_current_url from src.api import get_api_page from src.encoder import DateAwareEncoder from src.externals import process_nav_includes from src.grammar import get_grammar from src.markdown.makrdown import jinja_aware_markdown from src.pages.MyFlatPages import MyFlatPages from src.pdf import generate_pdf from src.processors.processors import process_code_blocks from src.processors.processors import set_replace_simple_code from src.search import build_search_indices from src.sitemap import generate_sitemap, generate_temporary_sitemap from src.ktl_components import KTLComponentExtension app = Flask(__name__, static_folder='_assets') app.config.from_pyfile('mysettings.py') app.jinja_env.trim_blocks = True app.jinja_env.lstrip_blocks = True pages = MyFlatPages(app) freezer = Freezer(app) ignore_stdlib = False build_mode = False build_contenteditable = False build_check_links = True build_errors = [] url_adapter = app.create_url_adapter(None) root_folder = path.join(os.path.dirname(__file__)) data_folder = path.join(os.path.dirname(__file__), "data") _nav_cache = None _nav_lock = threading.RLock() _cached_asset_version = {} def get_asset_version(filename): if filename in _cached_asset_version: return _cached_asset_version[filename] filepath = (root_folder if root_folder else ".") + filename if filename and path.exists(filepath): with open(filepath, 'rb') as file: digest = md5(file.read()).hexdigest() _cached_asset_version[filename] = digest return digest return None def get_site_data(): data = {} for data_file in os.listdir(data_folder): if data_file.startswith('_'): continue if not data_file.endswith(".yml"): continue data_file_path = path.join(data_folder, data_file) with open(data_file_path, encoding="UTF-8") as stream: try: file_name_without_extension = data_file[:-4] if data_file.endswith(".yml") else data_file data[file_name_without_extension] = yaml.load(stream, Loader=FullLoader) except yaml.YAMLError as exc: sys.stderr.write('Cant parse data file ' + data_file + ': ') sys.stderr.write(str(exc)) sys.exit(-1) except IOError as exc: sys.stderr.write('Cant read data file ' + data_file + ': ') sys.stderr.write(str(exc)) sys.exit(-1) return data site_data = get_site_data() def get_nav(): global _nav_cache global _nav_lock with _nav_lock: if _nav_cache is not None: nav = _nav_cache else: nav = get_nav_impl() nav = copy.deepcopy(nav) if build_mode: _nav_cache = copy.deepcopy(nav) # NOTE. This call depends on `request.path`, cannot cache process_nav(request.path, nav) return nav def get_nav_impl(): with open(path.join(data_folder, "_nav.yml")) as stream: nav = yaml.load(stream, Loader=FullLoader) nav = process_nav_includes(build_mode, nav) return nav def get_kotlin_features(): features_dir = path.join(os.path.dirname(__file__), "kotlin-features") features = [] for feature_meta in yaml.load(open(path.join(features_dir, "kotlin-features.yml"))): file_path = path.join(features_dir, feature_meta['content_file']) with open(file_path, encoding='utf-8') as f: content = f.read() content = content.replace("\r\n", "\n") if file_path.endswith(".md"): html_content = BeautifulSoup(jinja_aware_markdown(content, pages), 'html.parser') content = process_code_blocks(html_content) features.append(Feature(content, feature_meta)) return features @app.context_processor def add_year_to_context(): return { 'year': datetime.datetime.now().year } app.jinja_env.add_extension(KTLComponentExtension) @app.context_processor def add_data_to_context(): nav = get_nav() return { 'nav': nav, 'data': site_data, 'site': { 'pdf_url': app.config['PDF_URL'], 'forum_url': app.config['FORUM_URL'], 'site_github_url': app.config['SITE_GITHUB_URL'], 'data': site_data, 'text_using_gradle': app.config['TEXT_USING_GRADLE'], 'code_baseurl': app.config['CODE_URL'], 'contenteditable': build_contenteditable }, 'headerCurrentUrl': get_current_url(nav['subnav']['content']) } @app.template_filter('get_domain') def get_domain(url): return urlparse(url).netloc app.jinja_env.globals['get_domain'] = get_domain @app.template_filter('split_chunk') def split_chunk(list, size): return [list[i:i+size] for i in range(len(list))[::size]] app.jinja_env.globals['split_chunk'] = split_chunk @app.template_filter('autoversion') def autoversion_filter(filename): asset_version = get_asset_version(filename) if asset_version is None: return filename original = urlparse(filename)._asdict() original.update(query=original.get('query') + '&v=' + asset_version) return ParseResult(**original).geturl() @app.route('/data/events.json') def get_events(): with open(path.join(data_folder, "events.xml"), encoding="UTF-8") as events_file: events = xmltodict.parse(events_file.read())['events']['event'] return Response(json.dumps(events, cls=DateAwareEncoder), mimetype='application/json') @app.route('/data/cities.json') def get_cities(): return Response(json.dumps(site_data['cities'], cls=DateAwareEncoder), mimetype='application/json') @app.route('/data/kotlinconf.json') def get_kotlinconf(): return Response(json.dumps(site_data['kotlinconf'], cls=DateAwareEncoder), mimetype='application/json') @app.route('/data/universities.json') def get_universities(): return Response(json.dumps(site_data['universities'], cls=DateAwareEncoder), mimetype='application/json') @app.route('/data/user-groups.json') def get_user_groups(): return Response(json.dumps(site_data['user-groups'], cls=DateAwareEncoder), mimetype='application/json') @app.route('/docs/reference/grammar.html') def grammar(): grammar = get_grammar(build_mode) if grammar is None: return "Grammar file not found", 404 return render_template('pages/grammar.html', kotlinGrammar=grammar) @app.route('/docs/videos.html') def videos_page(): return render_template('pages/videos.html', videos=process_video_nav(site_data['videos'])) @app.route('/docs/kotlin-reference.pdf') def kotlin_reference_pdf(): return send_file(path.join(root_folder, "assets", "kotlin-reference.pdf")) @app.route('/docs/kotlin-docs.pdf') def kotlin_docs_pdf(): return send_file(path.join(root_folder, "assets", "kotlin-reference.pdf")) @app.route('/community/') def community_page(): return render_template('pages/community.html') @app.route('/user-groups/user-group-list.html') def user_group_list(): return render_template( 'pages/user-groups/user-group-list.html', user_groups_data=site_data['user-groups'], number_of_groups=sum(map(lambda section: len(section['groups']), site_data['user-groups']))) @app.route('/education/') def education_page(): return render_template('pages/education/index.html') @app.route('/') def index_page(): features = get_kotlin_features() return render_template('pages/index.html', is_index_page=True, features=features ) def process_page(page_path): # get_nav() has side effect to copy and patch files from the `external` folder # under site folder. We need it for dev mode to make sure file is up-to-date # TODO: extract get_nav and implement the explicit way to avoid side-effects get_nav() page = pages.get_or_404(page_path) if 'redirect_path' in page.meta and page.meta['redirect_path'] is not None: page_path = page.meta['redirect_path'] if page_path.startswith('https://') or page_path.startswith('http://'): return render_template('redirect.html', url=page_path) else: return render_template('redirect.html', url=url_for('page', page_path = page_path)) if 'date' in page.meta and page['date'] is not None: page.meta['formatted_date'] = page.meta['date'].strftime('%d %B %Y') if page.meta['formatted_date'].startswith('0'): page.meta['formatted_date'] = page.meta['formatted_date'][1:] if 'github_edit_url' in page.meta: edit_on_github_url = page.meta['github_edit_url'] else: edit_on_github_url = app.config['EDIT_ON_GITHUB_URL'] + app.config['FLATPAGES_ROOT'] + "/" + page_path + \ app.config['FLATPAGES_EXTENSION'] assert_valid_git_hub_url(edit_on_github_url, page_path) template = page.meta["layout"] if 'layout' in page.meta else 'default.html' if not template.endswith(".html"): template += ".html" if build_check_links: validate_links_weak(page, page_path) return render_template( template, page=page, baseurl="", edit_on_github_url=edit_on_github_url, ) def validate_links_weak(page, page_path): for link in page.parsed_html.select('a'): if 'href' not in link.attrs: continue href = urlparse(urljoin('/' + page_path, link['href'])) if href.scheme != '': continue endpoint, params = url_adapter.match(href.path, 'GET', query_args={}) if endpoint != 'page' and endpoint != 'get_index_page': response = app.test_client().get(href.path) if response.status_code == 404: build_errors.append("Broken link: " + str(href.path) + " on page " + page_path) continue referenced_page = pages.get(params['page_path']) if referenced_page is None: build_errors.append("Broken link: " + str(href.path) + " on page " + page_path) continue if href.fragment == '': continue ids = [] for x in referenced_page.parsed_html.select('h1,h2,h3,h4'): try: ids.append(x['id']) except KeyError: pass for x in referenced_page.parsed_html.select('a'): try: ids.append(x['name']) except KeyError: pass if href.fragment not in ids: build_errors.append("Bad anchor: " + str(href.fragment) + " on page " + page_path) if not build_mode and len(build_errors) > 0: errors_copy = [] for item in build_errors: errors_copy.append(item) build_errors.clear() raise Exception("Validation errors " + str(len(errors_copy)) + ":\n\n" + "\n".join(str(item) for item in errors_copy)) @freezer.register_generator def page(): for page in pages: yield {'page_path': page.path} @app.route('/<path:page_path>.html') def page(page_path): return process_page(page_path) @app.route('/404.html') def page_404(): return render_template('pages/404.html') @freezer.register_generator def api_page(): api_folder = path.join(root_folder, 'api') for root, dirs, files in os.walk(api_folder): for file in files: yield {'page_path': path.join(path.relpath(root, api_folder), file).replace(os.sep, '/')} class RedirectTemplateView(View): def __init__(self, url): self.redirect_url = url def dispatch_request(self): return render_template('redirect.html', url=self.redirect_url) def generate_redirect_pages(): redirects_folder = path.join(root_folder, 'redirects') for root, dirs, files in os.walk(redirects_folder): for file in files: if not file.endswith(".yml"): continue redirects_file_path = path.join(redirects_folder, file) with open(redirects_file_path, encoding="UTF-8") as stream: try: redirects = yaml.load(stream, Loader=FullLoader) for entry in redirects: url_to = entry["to"] url_from = entry["from"] url_list = url_from if isinstance(url_from, list) else [url_from] for url in url_list: app.add_url_rule(url, view_func=RedirectTemplateView.as_view(url, url=url_to)) except yaml.YAMLError as exc: sys.stderr.write('Cant parse data file ' + file + ': ') sys.stderr.write(str(exc)) sys.exit(-1) except IOError as exc: sys.stderr.write('Cant read data file ' + file + ': ') sys.stderr.write(str(exc)) sys.exit(-1) @app.errorhandler(404) def page_not_found(e): return render_template('pages/404.html'), 404 app.register_error_handler(404, page_not_found) @app.route('/api/<path:page_path>') def api_page(page_path): path_other, ext = path.splitext(page_path) if ext == '.html': return process_api_page(page_path[:-5]) elif path.basename(page_path) == "package-list" or ext: return respond_with_package_list(page_path) elif not page_path.endswith('/'): page_path += '/' return process_api_page(page_path + 'index') def process_api_page(page_path): return render_template( 'api.html', page=get_api_page(build_mode, page_path) ) def respond_with_package_list(page_path): file_path = path.join(root_folder, 'api', page_path) if not path.exists(file_path): return make_response(path.basename(page_path) + " not found", 404) return send_file(file_path, mimetype="text/plain") @app.route('/assets/<path:path>') def asset(path): return send_from_directory('assets', path) @app.route('/assets/images/tutorials/<path:filename>') def tutorial_img(filename): return send_from_directory(path.join('assets', 'images', 'tutorials'), filename) @freezer.register_generator def asset(): for filename in walk_directory(path.join(root_folder, "assets")): yield {'path': filename} # MASKED: get_index_page function (lines 468-481) generate_redirect_pages() @app.after_request def add_header(request): request.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" request.headers["Pragma"] = "no-cache" request.headers["Expires"] = "0" request.headers['Cache-Control'] = 'public, max-age=0' return request if __name__ == '__main__': print("\n\n\nRunning new KotlinWebSite generator/dev-mode:\n") argv_copy = [] for arg in sys.argv: print("arg: " + arg) if arg == "--ignore-stdlib": ignore_stdlib = True elif arg == "--no-check-links": build_check_links = False elif arg == "--editable": build_contenteditable = True else: argv_copy.append(arg) print("\n\n") print("ignore_stdlib: " + str(ignore_stdlib)) print("build_check_links: " + str(build_check_links)) print("build_contenteditable: " + str(build_contenteditable)) print("\n\n") set_replace_simple_code(build_contenteditable) with (open(path.join(root_folder, "_nav-mapped.yml"), 'w')) as output: yaml.dump(get_nav_impl(), output) if len(argv_copy) > 1: if argv_copy[1] == "build": build_mode = True urls = freezer.freeze() if len(build_errors) > 0: for error in build_errors: sys.stderr.write(error + '\n') sys.exit(-1) elif argv_copy[1] == "sitemap": generate_sitemap(get_dist_pages()) # temporary sitemap generate_temporary_sitemap() elif argv_copy[1] == "index": build_search_indices(get_dist_pages()) elif argv_copy[1] == "reference-pdf": generate_pdf("kotlin-docs.pdf", site_data) else: print("Unknown argument: " + argv_copy[1]) sys.exit(1) else: app.run(host="0.0.0.0", debug=True, threaded=True, **{"extra_files": { '/src/data/_nav.yml', *glob.glob("/src/pages-includes/**/*", recursive=True), }})
@app.route('/<path:page_path>') def get_index_page(page_path): """ Handle requests which urls don't end with '.html' (for example, '/doc/') We don't need any generator here, because such urls are equivalent to the same urls with 'index.html' at the end. :param page_path: str :return: str """ if not page_path.endswith('/'): page_path += '/' return process_page(page_path + 'index')
468
481
import copy import datetime import glob import json import os import sys import threading from os import path from urllib.parse import urlparse, urljoin, ParseResult import xmltodict import yaml from bs4 import BeautifulSoup from flask import Flask, render_template, Response, send_from_directory, request from flask.views import View from flask.helpers import url_for, send_file, make_response from flask_frozen import Freezer, walk_directory from hashlib import md5 from yaml import FullLoader from src.Feature import Feature from src.dist import get_dist_pages from src.github import assert_valid_git_hub_url from src.navigation import process_video_nav, process_nav, get_current_url from src.api import get_api_page from src.encoder import DateAwareEncoder from src.externals import process_nav_includes from src.grammar import get_grammar from src.markdown.makrdown import jinja_aware_markdown from src.pages.MyFlatPages import MyFlatPages from src.pdf import generate_pdf from src.processors.processors import process_code_blocks from src.processors.processors import set_replace_simple_code from src.search import build_search_indices from src.sitemap import generate_sitemap, generate_temporary_sitemap from src.ktl_components import KTLComponentExtension app = Flask(__name__, static_folder='_assets') app.config.from_pyfile('mysettings.py') app.jinja_env.trim_blocks = True app.jinja_env.lstrip_blocks = True pages = MyFlatPages(app) freezer = Freezer(app) ignore_stdlib = False build_mode = False build_contenteditable = False build_check_links = True build_errors = [] url_adapter = app.create_url_adapter(None) root_folder = path.join(os.path.dirname(__file__)) data_folder = path.join(os.path.dirname(__file__), "data") _nav_cache = None _nav_lock = threading.RLock() _cached_asset_version = {} def get_asset_version(filename): if filename in _cached_asset_version: return _cached_asset_version[filename] filepath = (root_folder if root_folder else ".") + filename if filename and path.exists(filepath): with open(filepath, 'rb') as file: digest = md5(file.read()).hexdigest() _cached_asset_version[filename] = digest return digest return None def get_site_data(): data = {} for data_file in os.listdir(data_folder): if data_file.startswith('_'): continue if not data_file.endswith(".yml"): continue data_file_path = path.join(data_folder, data_file) with open(data_file_path, encoding="UTF-8") as stream: try: file_name_without_extension = data_file[:-4] if data_file.endswith(".yml") else data_file data[file_name_without_extension] = yaml.load(stream, Loader=FullLoader) except yaml.YAMLError as exc: sys.stderr.write('Cant parse data file ' + data_file + ': ') sys.stderr.write(str(exc)) sys.exit(-1) except IOError as exc: sys.stderr.write('Cant read data file ' + data_file + ': ') sys.stderr.write(str(exc)) sys.exit(-1) return data site_data = get_site_data() def get_nav(): global _nav_cache global _nav_lock with _nav_lock: if _nav_cache is not None: nav = _nav_cache else: nav = get_nav_impl() nav = copy.deepcopy(nav) if build_mode: _nav_cache = copy.deepcopy(nav) # NOTE. This call depends on `request.path`, cannot cache process_nav(request.path, nav) return nav def get_nav_impl(): with open(path.join(data_folder, "_nav.yml")) as stream: nav = yaml.load(stream, Loader=FullLoader) nav = process_nav_includes(build_mode, nav) return nav def get_kotlin_features(): features_dir = path.join(os.path.dirname(__file__), "kotlin-features") features = [] for feature_meta in yaml.load(open(path.join(features_dir, "kotlin-features.yml"))): file_path = path.join(features_dir, feature_meta['content_file']) with open(file_path, encoding='utf-8') as f: content = f.read() content = content.replace("\r\n", "\n") if file_path.endswith(".md"): html_content = BeautifulSoup(jinja_aware_markdown(content, pages), 'html.parser') content = process_code_blocks(html_content) features.append(Feature(content, feature_meta)) return features @app.context_processor def add_year_to_context(): return { 'year': datetime.datetime.now().year } app.jinja_env.add_extension(KTLComponentExtension) @app.context_processor def add_data_to_context(): nav = get_nav() return { 'nav': nav, 'data': site_data, 'site': { 'pdf_url': app.config['PDF_URL'], 'forum_url': app.config['FORUM_URL'], 'site_github_url': app.config['SITE_GITHUB_URL'], 'data': site_data, 'text_using_gradle': app.config['TEXT_USING_GRADLE'], 'code_baseurl': app.config['CODE_URL'], 'contenteditable': build_contenteditable }, 'headerCurrentUrl': get_current_url(nav['subnav']['content']) } @app.template_filter('get_domain') def get_domain(url): return urlparse(url).netloc app.jinja_env.globals['get_domain'] = get_domain @app.template_filter('split_chunk') def split_chunk(list, size): return [list[i:i+size] for i in range(len(list))[::size]] app.jinja_env.globals['split_chunk'] = split_chunk @app.template_filter('autoversion') def autoversion_filter(filename): asset_version = get_asset_version(filename) if asset_version is None: return filename original = urlparse(filename)._asdict() original.update(query=original.get('query') + '&v=' + asset_version) return ParseResult(**original).geturl() @app.route('/data/events.json') def get_events(): with open(path.join(data_folder, "events.xml"), encoding="UTF-8") as events_file: events = xmltodict.parse(events_file.read())['events']['event'] return Response(json.dumps(events, cls=DateAwareEncoder), mimetype='application/json') @app.route('/data/cities.json') def get_cities(): return Response(json.dumps(site_data['cities'], cls=DateAwareEncoder), mimetype='application/json') @app.route('/data/kotlinconf.json') def get_kotlinconf(): return Response(json.dumps(site_data['kotlinconf'], cls=DateAwareEncoder), mimetype='application/json') @app.route('/data/universities.json') def get_universities(): return Response(json.dumps(site_data['universities'], cls=DateAwareEncoder), mimetype='application/json') @app.route('/data/user-groups.json') def get_user_groups(): return Response(json.dumps(site_data['user-groups'], cls=DateAwareEncoder), mimetype='application/json') @app.route('/docs/reference/grammar.html') def grammar(): grammar = get_grammar(build_mode) if grammar is None: return "Grammar file not found", 404 return render_template('pages/grammar.html', kotlinGrammar=grammar) @app.route('/docs/videos.html') def videos_page(): return render_template('pages/videos.html', videos=process_video_nav(site_data['videos'])) @app.route('/docs/kotlin-reference.pdf') def kotlin_reference_pdf(): return send_file(path.join(root_folder, "assets", "kotlin-reference.pdf")) @app.route('/docs/kotlin-docs.pdf') def kotlin_docs_pdf(): return send_file(path.join(root_folder, "assets", "kotlin-reference.pdf")) @app.route('/community/') def community_page(): return render_template('pages/community.html') @app.route('/user-groups/user-group-list.html') def user_group_list(): return render_template( 'pages/user-groups/user-group-list.html', user_groups_data=site_data['user-groups'], number_of_groups=sum(map(lambda section: len(section['groups']), site_data['user-groups']))) @app.route('/education/') def education_page(): return render_template('pages/education/index.html') @app.route('/') def index_page(): features = get_kotlin_features() return render_template('pages/index.html', is_index_page=True, features=features ) def process_page(page_path): # get_nav() has side effect to copy and patch files from the `external` folder # under site folder. We need it for dev mode to make sure file is up-to-date # TODO: extract get_nav and implement the explicit way to avoid side-effects get_nav() page = pages.get_or_404(page_path) if 'redirect_path' in page.meta and page.meta['redirect_path'] is not None: page_path = page.meta['redirect_path'] if page_path.startswith('https://') or page_path.startswith('http://'): return render_template('redirect.html', url=page_path) else: return render_template('redirect.html', url=url_for('page', page_path = page_path)) if 'date' in page.meta and page['date'] is not None: page.meta['formatted_date'] = page.meta['date'].strftime('%d %B %Y') if page.meta['formatted_date'].startswith('0'): page.meta['formatted_date'] = page.meta['formatted_date'][1:] if 'github_edit_url' in page.meta: edit_on_github_url = page.meta['github_edit_url'] else: edit_on_github_url = app.config['EDIT_ON_GITHUB_URL'] + app.config['FLATPAGES_ROOT'] + "/" + page_path + \ app.config['FLATPAGES_EXTENSION'] assert_valid_git_hub_url(edit_on_github_url, page_path) template = page.meta["layout"] if 'layout' in page.meta else 'default.html' if not template.endswith(".html"): template += ".html" if build_check_links: validate_links_weak(page, page_path) return render_template( template, page=page, baseurl="", edit_on_github_url=edit_on_github_url, ) def validate_links_weak(page, page_path): for link in page.parsed_html.select('a'): if 'href' not in link.attrs: continue href = urlparse(urljoin('/' + page_path, link['href'])) if href.scheme != '': continue endpoint, params = url_adapter.match(href.path, 'GET', query_args={}) if endpoint != 'page' and endpoint != 'get_index_page': response = app.test_client().get(href.path) if response.status_code == 404: build_errors.append("Broken link: " + str(href.path) + " on page " + page_path) continue referenced_page = pages.get(params['page_path']) if referenced_page is None: build_errors.append("Broken link: " + str(href.path) + " on page " + page_path) continue if href.fragment == '': continue ids = [] for x in referenced_page.parsed_html.select('h1,h2,h3,h4'): try: ids.append(x['id']) except KeyError: pass for x in referenced_page.parsed_html.select('a'): try: ids.append(x['name']) except KeyError: pass if href.fragment not in ids: build_errors.append("Bad anchor: " + str(href.fragment) + " on page " + page_path) if not build_mode and len(build_errors) > 0: errors_copy = [] for item in build_errors: errors_copy.append(item) build_errors.clear() raise Exception("Validation errors " + str(len(errors_copy)) + ":\n\n" + "\n".join(str(item) for item in errors_copy)) @freezer.register_generator def page(): for page in pages: yield {'page_path': page.path} @app.route('/<path:page_path>.html') def page(page_path): return process_page(page_path) @app.route('/404.html') def page_404(): return render_template('pages/404.html') @freezer.register_generator def api_page(): api_folder = path.join(root_folder, 'api') for root, dirs, files in os.walk(api_folder): for file in files: yield {'page_path': path.join(path.relpath(root, api_folder), file).replace(os.sep, '/')} class RedirectTemplateView(View): def __init__(self, url): self.redirect_url = url def dispatch_request(self): return render_template('redirect.html', url=self.redirect_url) def generate_redirect_pages(): redirects_folder = path.join(root_folder, 'redirects') for root, dirs, files in os.walk(redirects_folder): for file in files: if not file.endswith(".yml"): continue redirects_file_path = path.join(redirects_folder, file) with open(redirects_file_path, encoding="UTF-8") as stream: try: redirects = yaml.load(stream, Loader=FullLoader) for entry in redirects: url_to = entry["to"] url_from = entry["from"] url_list = url_from if isinstance(url_from, list) else [url_from] for url in url_list: app.add_url_rule(url, view_func=RedirectTemplateView.as_view(url, url=url_to)) except yaml.YAMLError as exc: sys.stderr.write('Cant parse data file ' + file + ': ') sys.stderr.write(str(exc)) sys.exit(-1) except IOError as exc: sys.stderr.write('Cant read data file ' + file + ': ') sys.stderr.write(str(exc)) sys.exit(-1) @app.errorhandler(404) def page_not_found(e): return render_template('pages/404.html'), 404 app.register_error_handler(404, page_not_found) @app.route('/api/<path:page_path>') def api_page(page_path): path_other, ext = path.splitext(page_path) if ext == '.html': return process_api_page(page_path[:-5]) elif path.basename(page_path) == "package-list" or ext: return respond_with_package_list(page_path) elif not page_path.endswith('/'): page_path += '/' return process_api_page(page_path + 'index') def process_api_page(page_path): return render_template( 'api.html', page=get_api_page(build_mode, page_path) ) def respond_with_package_list(page_path): file_path = path.join(root_folder, 'api', page_path) if not path.exists(file_path): return make_response(path.basename(page_path) + " not found", 404) return send_file(file_path, mimetype="text/plain") @app.route('/assets/<path:path>') def asset(path): return send_from_directory('assets', path) @app.route('/assets/images/tutorials/<path:filename>') def tutorial_img(filename): return send_from_directory(path.join('assets', 'images', 'tutorials'), filename) @freezer.register_generator def asset(): for filename in walk_directory(path.join(root_folder, "assets")): yield {'path': filename} @app.route('/<path:page_path>') def get_index_page(page_path): """ Handle requests which urls don't end with '.html' (for example, '/doc/') We don't need any generator here, because such urls are equivalent to the same urls with 'index.html' at the end. :param page_path: str :return: str """ if not page_path.endswith('/'): page_path += '/' return process_page(page_path + 'index') generate_redirect_pages() @app.after_request def add_header(request): request.headers["Cache-Control"] = "no-cache, no-store, must-revalidate" request.headers["Pragma"] = "no-cache" request.headers["Expires"] = "0" request.headers['Cache-Control'] = 'public, max-age=0' return request if __name__ == '__main__': print("\n\n\nRunning new KotlinWebSite generator/dev-mode:\n") argv_copy = [] for arg in sys.argv: print("arg: " + arg) if arg == "--ignore-stdlib": ignore_stdlib = True elif arg == "--no-check-links": build_check_links = False elif arg == "--editable": build_contenteditable = True else: argv_copy.append(arg) print("\n\n") print("ignore_stdlib: " + str(ignore_stdlib)) print("build_check_links: " + str(build_check_links)) print("build_contenteditable: " + str(build_contenteditable)) print("\n\n") set_replace_simple_code(build_contenteditable) with (open(path.join(root_folder, "_nav-mapped.yml"), 'w')) as output: yaml.dump(get_nav_impl(), output) if len(argv_copy) > 1: if argv_copy[1] == "build": build_mode = True urls = freezer.freeze() if len(build_errors) > 0: for error in build_errors: sys.stderr.write(error + '\n') sys.exit(-1) elif argv_copy[1] == "sitemap": generate_sitemap(get_dist_pages()) # temporary sitemap generate_temporary_sitemap() elif argv_copy[1] == "index": build_search_indices(get_dist_pages()) elif argv_copy[1] == "reference-pdf": generate_pdf("kotlin-docs.pdf", site_data) else: print("Unknown argument: " + argv_copy[1]) sys.exit(1) else: app.run(host="0.0.0.0", debug=True, threaded=True, **{"extra_files": { '/src/data/_nav.yml', *glob.glob("/src/pages-includes/**/*", recursive=True), }})
mean_precision_k
Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np # MASKED: mean_precision_k function (lines 12-32) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks)
12
32
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
mean_recall_k
Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) # MASKED: mean_recall_k function (lines 35-55) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks)
35
55
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
mean_ndcg_score
Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) # MASKED: mean_ndcg_score function (lines 58-80) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s)
58
80
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
mean_rprecision_k
Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) # MASKED: mean_rprecision_k function (lines 83-103) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks)
83
103
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
ranking_recall_score
Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) # MASKED: ranking_recall_score function (lines 106-135) def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos
106
135
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
ranking_precision_score
Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos # MASKED: ranking_precision_score function (lines 138-165) def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k
138
165
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
ranking_rprecision_score
Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k # MASKED: ranking_rprecision_score function (lines 168-197) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos)
168
197
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
average_precision_score
Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) # MASKED: average_precision_score function (lines 200-242) def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos
200
242
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
dcg_score
Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos # MASKED: dcg_score function (lines 245-273) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts)
245
273
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
ndcg_score
Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) # MASKED: ndcg_score function (lines 276-294) # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best
276
294
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
dcg_from_ranking
Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. # MASKED: dcg_from_ranking function (lines 299-321) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts)
299
321
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
ndcg_from_ranking
Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) # MASKED: ndcg_from_ranking function (lines 324-344) def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best
324
344
# https://github.com/iliaschalkidis/lmtc-eurlex57k/blob/master/metrics.py from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score import numpy as np def mean_precision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_precision_score(y_t, y_s, k=k)) return np.mean(p_ks) def mean_recall_k(y_true, y_score, k=10): """Mean recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean recall @k : float """ r_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): r_ks.append(ranking_recall_score(y_t, y_s, k=k)) return np.mean(r_ks) def mean_ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- Mean NDCG @k : float """ ndcg_s = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): ndcg_s.append(ndcg_score(y_t, y_s, k=k, gains=gains)) return np.mean(ndcg_s) def mean_rprecision_k(y_true, y_score, k=10): """Mean precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- mean precision @k : float """ p_ks = [] for y_t, y_s in zip(y_true, y_score): if np.sum(y_t == 1): p_ks.append(ranking_rprecision_score(y_t, y_s, k=k)) return np.mean(p_ks) def ranking_recall_score(y_true, y_score, k=10): # https://ils.unc.edu/courses/2013_spring/inls509_001/lectures/10-EvaluationMetrics.pdf """Recall at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / n_pos def ranking_precision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) return float(n_relevant) / k def ranking_rprecision_score(y_true, y_score, k=10): """Precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) n_relevant = np.sum(y_true == pos_label) # Divide by min(n_pos, k) such that the best achievable score is always 1.0. return float(n_relevant) / min(k, n_pos) def average_precision_score(y_true, y_score, k=10): """Average precision at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. Returns ------- average precision @k : float """ unique_y = np.unique(y_true) if len(unique_y) == 1: return ValueError("The score cannot be approximated.") elif len(unique_y) > 2: raise ValueError("Only supported for two relevance levels.") pos_label = unique_y[1] n_pos = np.sum(y_true == pos_label) order = np.argsort(y_score)[::-1][:min(n_pos, k)] y_true = np.asarray(y_true)[order] score = 0 for i in range(len(y_true)): if y_true[i] == pos_label: # Compute precision up to document i # i.e, percentage of relevant documents up to document i. prec = 0 for j in range(0, i + 1): if y_true[j] == pos_label: prec += 1.0 prec /= (i + 1.0) score += prec if n_pos == 0: return 0 return score / n_pos def dcg_score(y_true, y_score, k=10, gains="exponential"): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- DCG @k : float """ order = np.argsort(y_score)[::-1] y_true = np.take(y_true, order[:k]) if gains == "exponential": gains = 2 ** y_true - 1 elif gains == "linear": gains = y_true else: raise ValueError("Invalid gains option.") # highest rank is 1 so +2 instead of +1 discounts = np.log2(np.arange(len(y_true)) + 2) return np.sum(gains / discounts) def ndcg_score(y_true, y_score, k=10, gains="exponential"): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). y_score : array-like, shape = [n_samples] Predicted scores. k : int Rank. gains : str Whether gains should be "exponential" (default) or "linear". Returns ------- NDCG @k : float """ best = dcg_score(y_true, y_true, k, gains) actual = dcg_score(y_true, y_score, k, gains) return actual / best # Alternative API. def dcg_from_ranking(y_true, ranking): """Discounted cumulative gain (DCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- DCG @k : float """ y_true = np.asarray(y_true) ranking = np.asarray(ranking) rel = y_true[ranking] gains = 2 ** rel - 1 discounts = np.log2(np.arange(len(ranking)) + 2) return np.sum(gains / discounts) def ndcg_from_ranking(y_true, ranking): """Normalized discounted cumulative gain (NDCG) at rank k Parameters ---------- y_true : array-like, shape = [n_samples] Ground truth (true relevance labels). ranking : array-like, shape = [k] Document indices, i.e., ranking[0] is the index of top-ranked document, ranking[1] is the index of second-ranked document, ... k : int Rank. Returns ------- NDCG @k : float """ k = len(ranking) best_ranking = np.argsort(y_true)[::-1] best = dcg_from_ranking(y_true, best_ranking[:k]) return dcg_from_ranking(y_true, ranking) / best def colwise_accuracy(y_true,y_pred): y_pred=y_pred.T y_true=y_true.T acc_list=[] for cate in range(0,y_pred.shape[0]): acc_list.append(accuracy_score(y_pred[cate],y_true[cate])) return sum(acc_list)/len(acc_list) def calculate_metrics(pred, target, threshold=0.5): pred = np.array(pred > threshold, dtype=float) return {'Accuracy': accuracy_score(y_true=target, y_pred=pred), 'Column-wise Accuracy': colwise_accuracy(y_true=target, y_pred=pred), 'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro'), 'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro'), 'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro'), 'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'), 'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'), 'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'), 'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'), 'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples'), 'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples'), }
fold_loaders
Loading a specific fold as train and test data loader. If no fold number is provided it returns the next fold. It returns a randomly sampled subset of the original data set. Args: fold: fold number to return Returns: (train data loader, test data loader)
import torch class KFold: def __init__(self, dataset, n_fold=10, batch_size=32, num_workers=0, pin_memory=False): self.fold = 0 self.batch_size = batch_size self.num_workers = num_workers self.pin_memory = pin_memory self.dataset = dataset self.n_fold = n_fold self.fold_size = len(self.dataset) // self.n_fold self.folded_size = self.n_fold * self.fold_size self.fold_idx = self.fold_split() def fold_split(self, random_seed=None): """ Splitting the folds. Args: random_seed: Random seed for reproducibility Returns: tensor containing indices for folds, where dim=0 is the fold number """ if random_seed is not None: torch.manual_seed(random_seed) fold_idx = torch.randperm(self.dataset.__len__()) fold_idx = fold_idx[:self.folded_size].view(-1, self.fold_size) return fold_idx # MASKED: fold_loaders function (lines 35-62)
def fold_loaders(self, fold=-1): """ Loading a specific fold as train and test data loader. If no fold number is provided it returns the next fold. It returns a randomly sampled subset of the original data set. Args: fold: fold number to return Returns: (train data loader, test data loader) """ if fold == -1: fold = self.fold test_fold_idx = self.fold_idx[fold] train_fold_idx = self.fold_idx[[i for i in range(self.n_fold) if i != fold]].view(-1) train_loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, # args.batch_size, num_workers=self.num_workers, # args.loader_num_workers, pin_memory=self.pin_memory, sampler=torch.utils.data.SubsetRandomSampler(train_fold_idx)) test_loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, # args.batch_size, num_workers=self.num_workers, # args.loader_num_workers, pin_memory=self.pin_memory, sampler=torch.utils.data.SubsetRandomSampler(test_fold_idx)) self.fold = (self.fold + 1) % self.n_fold return train_loader, test_loader
35
62
import torch class KFold: def __init__(self, dataset, n_fold=10, batch_size=32, num_workers=0, pin_memory=False): self.fold = 0 self.batch_size = batch_size self.num_workers = num_workers self.pin_memory = pin_memory self.dataset = dataset self.n_fold = n_fold self.fold_size = len(self.dataset) // self.n_fold self.folded_size = self.n_fold * self.fold_size self.fold_idx = self.fold_split() def fold_split(self, random_seed=None): """ Splitting the folds. Args: random_seed: Random seed for reproducibility Returns: tensor containing indices for folds, where dim=0 is the fold number """ if random_seed is not None: torch.manual_seed(random_seed) fold_idx = torch.randperm(self.dataset.__len__()) fold_idx = fold_idx[:self.folded_size].view(-1, self.fold_size) return fold_idx def fold_loaders(self, fold=-1): """ Loading a specific fold as train and test data loader. If no fold number is provided it returns the next fold. It returns a randomly sampled subset of the original data set. Args: fold: fold number to return Returns: (train data loader, test data loader) """ if fold == -1: fold = self.fold test_fold_idx = self.fold_idx[fold] train_fold_idx = self.fold_idx[[i for i in range(self.n_fold) if i != fold]].view(-1) train_loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, # args.batch_size, num_workers=self.num_workers, # args.loader_num_workers, pin_memory=self.pin_memory, sampler=torch.utils.data.SubsetRandomSampler(train_fold_idx)) test_loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, # args.batch_size, num_workers=self.num_workers, # args.loader_num_workers, pin_memory=self.pin_memory, sampler=torch.utils.data.SubsetRandomSampler(test_fold_idx)) self.fold = (self.fold + 1) % self.n_fold return train_loader, test_loader
__init__
" INPUT PARAMETER 1) num_visible: number of visible units in the RBM INPUT PARAMETER 2) num_hidden: number of hidden units in the RBM INPUT PARAMETER 3) main_dir: main directory to put the models, data and summary directories INPUT PARAMETER 4) model_name: name of the model you wanna save the data INPUT PARAMETER 5) gibbs_sampling_steps: Default 1 (Hence Optional) INPUT PARAMETER 6) learning_rate: Default 0.01 (Hence Optional) INPUT PARAMETER 7) momentum: Default 0.9(Hence Optional) for Gradient Descent INPUT PARAMETER 8) l2: l2 regularization lambda value for weight decay Default 0.001(Hence Optional) INPUT PARAMETER 9) batch_size: Default 10 (Hence Optional) INPUT PARAMETER 10) num_epochs: Default 10 (Hence Optional) INPUT PARAMETER 11) stddev: optional, default 0.1. Ignored if visible_unit_type is not 'gauss' INPUT PARAMETER 12) verbose: evel of verbosity. optional, default 0(for Regularization) INPUT PARAMETER 13) plot_training_loss: whether or not to plot training loss, default True INPUT PARAMETER 14) visible_units_type: Binary or Gaussian (Default Binary)
import tensorflow as tf import numpy as np import os import matplotlib.pyplot as plt from tqdm import tqdm class RBM(object): # MASKED: __init__ function (lines 9-72) def sample_prob(self,probs,rand): """ takes a tensor of probabilitiesas from a sigmoidal activation and sample from all the distributions. probs INPUT parameter: tensor of probabilities rand INPUT parameter :tensor (of same shape as probabilities) of random values :RETURN binary sample of probabilities """ return tf.nn.relu(tf.sign(probs-rand)) def gen_batches(self,data,batch_size): """ Divide input data into batches data INPUT parameter: input data( like a data frame) batch_size INPUT parameter: desired size of each batch :RETURN data divided in batches """ data = np.array(data) for i in range(0,data.shape[0],batch_size): yield data[i:i+batch_size] def fit(self,train_set,validation_set = None,restore_previous_model=False): """" fit the model to the training data INPUT PARAMETER train_set: training set INPUT PARAMETER validation set.default None (Hence Optional) INPUT PARAMETER restore_previous_model: if true, a previous trained model with the same name of this model is restored from disk to continue training. OUTPUT: self """ if validation_set is not None: self.validation_size = validation_set.shape[0] tf.reset_default_graph() self._build_model()# you will come across it later on with tf.Session() as self.tf_session: self._initialize_tf_utilities_and_ops(restore_previous_model) self._train_model(train_set, validation_set) self.tf_saver.save(self.tf_session, self.model_path) if self.plot_training_loss: #plot editing should be done here as you wish plt.plot(self.training_losses) plt.title("Training batch losses v.s. iteractions") plt.xlabel("Num of training iteractions") plt.ylabel("Reconstruction error") plt.show() def _initialize_tf_utilities_and_ops(self, restore_previous_model): """" Initialize TensorFlow operations: summaries, init operations, saver, summary_writer. Restore a previously trained model if the flag restore_previous_model is true. """ init_op = tf.global_variables_initializer() self.tf_saver = tf.train.Saver() self.tf_session.run(init_op) if restore_previous_model: self.tf_saver.restore(self.tf_session, self.model_path) def _train_model(self, train_set, validation_set): """" Train the Model INPUT PARAMETER train set: Training set INPUT PARAMETER validation_set: Validation set OUTPUT self """ for i in range(self.num_epochs): self._run_train_step(train_set) if validation_set is not None: self._run_validation_error(i, validation_set) def _run_train_step(self,train_set): """" Run a training step. A training step is made by randomly shuffling the training set, divide into batches and run the variable update nodes for each batch. If self.plot_training_loss is true, will record training loss after each batch. INPUT PARAMETER train_set: training set OUTPUT self """ np.random.shuffle(train_set) batches = [_ for _ in self.gen_batches(train_set, self.batch_size)] updates = [self.w_upd8, self.bh_upd8, self.bv_upd8] for batch in batches: if self.plot_training_loss: _,loss = self.tf_session.run([updates,self.loss_function],feed_dict = self._create_feed_dict(batch)) self.training_losses.append(loss) else: self.tf_session.run(updates, feed_dict=self._create_feed_dict(batch)) def _run_validation_error(self, epoch, validation_set): """ Run the error computation on the validation set and print it out for each epoch. INPUT PARAMETER: current epoch INPUT PARAMETER validation_set: validation data OUTPUT: self """ loss = self.tf_session.run(self.loss_function, feed_dict=self._create_feed_dict(validation_set)) if self.verbose == 1: tqdm.write("Validation cost at step %s: %s" % (epoch, loss)) def _create_feed_dict(self, data): """ Create the dictionary of data to feed to TensorFlow's session during training. :param data: training/validation set batch :return: dictionary(self.input_data: data, self.hrand: random_uniform) """ return { self.input_data: data, self.hrand: np.random.rand(data.shape[0], self.num_hidden), } def _build_model(self): """ BUilding the Restriced Boltzman Machine in Tensorflow """ self.input_data, self.hrand = self._create_placeholders() #check the function below self.W, self.bh_, self.bv_, self.dw, self.dbh_, self.dbv_ = self._create_variables()#check the function below hprobs0, hstates0, vprobs, hprobs1, hstates1 = self.gibbs_sampling_step(self.input_data) positive = self.compute_positive_association(self.input_data, hprobs0, hstates0) nn_input = vprobs for step in range(self.gibbs_sampling_steps - 1): hprobs, hstates, vprobs, hprobs1, hstates1 = self.gibbs_sampling_step(nn_input) nn_input = vprobs self.reconstruct = vprobs negative = tf.matmul(tf.transpose(vprobs), hprobs1) self.encode = hprobs1 #exact formula in my paper dw = positive - negative self.dw = self.momentum*self.dw + (1-self.momentum)*dw self.w_upd8 = self.W.assign_add(self.learning_rate*self.dw - self.learning_rate*self.l2*self.W) dbh_ = tf.reduce_mean(hprobs0 - hprobs1, 0) self.dbh_ = self.momentum*self.dbh_ + self.learning_rate*dbh_ self.bh_upd8 = self.bh_.assign_add(self.dbh_) dbv_ = tf.reduce_mean(self.input_data - vprobs, 0) self.dbv_ = self.momentum*self.dbv_ + self.learning_rate*dbv_ self.bv_upd8 = self.bv_.assign_add(self.dbv_) self.loss_function = tf.sqrt(tf.reduce_mean(tf.square(self.input_data - vprobs))) self.batch_cost = tf.sqrt(tf.reduce_mean(tf.square(self.input_data - vprobs), 1)) self._create_free_energy_for_batch() def _create_free_energy_for_batch(self): """ Create free energy ops to batch input data :return: self """ if self.visible_unit_type == 'bin': self._create_free_energy_for_bin() elif self.visible_unit_type == 'gauss': self._create_free_energy_for_gauss() else: self.batch_free_energy = None def _create_free_energy_for_bin(self): """ Create free energy for mdoel with Bin visible layer :return: self """ #Refer to the Binary Free Energy Equation self.batch_free_energy = - (tf.matmul(self.input_data, tf.reshape(self.bv_, [-1, 1])) + tf.reshape(tf.reduce_sum(tf.log(tf.exp(tf.matmul(self.input_data, self.W) + self.bh_) + 1), 1), [-1, 1])) def _create_free_energy_for_gauss(self): """ Create free energy for model with Gauss visible layer :return: self """ #Refer to the Gaussian Free Energy Equation self.batch_free_energy = - (tf.matmul(self.input_data, tf.reshape(self.bv_, [-1, 1])) - tf.reshape(tf.reduce_sum(0.5 * self.input_data * self.input_data, 1), [-1, 1]) + tf.reshape(tf.reduce_sum(tf.log(tf.exp(tf.matmul(self.input_data, self.W) + self.bh_) + 1), 1), [-1, 1])) def _create_placeholders(self): """ Create the TensorFlow placeholders for the model. :return: tuple(input(shape(None, num_visible)), hrand(shape(None, num_hidden))) """ x = tf.placeholder('float', [None, self.num_visible], name='x-input') hrand = tf.placeholder('float', [None, self.num_hidden], name='hrand') return x, hrand def _create_variables(self): """ Create the TensorFlow variables for the model. :return: tuple(weights(shape(num_visible, num_hidden), hidden bias(shape(num_hidden)), visible bias(shape(num_visible))) """ W = tf.Variable(tf.random_normal((self.num_visible, self.num_hidden), mean=0.0, stddev=0.01), name='weights') dw = tf.Variable(tf.zeros([self.num_visible, self.num_hidden]), name = 'derivative-weights') bh_ = tf.Variable(tf.zeros([self.num_hidden]), name='hidden-bias') dbh_ = tf.Variable(tf.zeros([self.num_hidden]), name='derivative-hidden-bias') bv_ = tf.Variable(tf.zeros([self.num_visible]), name='visible-bias') dbv_ = tf.Variable(tf.zeros([self.num_visible]), name='derivative-visible-bias') return W, bh_, bv_, dw, dbh_, dbv_ def gibbs_sampling_step(self, visible): """ Performs one step of gibbs sampling. :param visible: activations of the visible units :return: tuple(hidden probs, hidden states, visible probs, new hidden probs, new hidden states) """ hprobs, hstates = self.sample_hidden_from_visible(visible) vprobs = self.sample_visible_from_hidden(hprobs) hprobs1, hstates1 = self.sample_hidden_from_visible(vprobs) return hprobs, hstates, vprobs, hprobs1, hstates1 def sample_hidden_from_visible(self, visible): """ Sample the hidden units from the visible units. This is the Positive phase of the Contrastive Divergence algorithm. :param visible: activations of the visible units :return: tuple(hidden probabilities, hidden binary states) """ hprobs = tf.nn.sigmoid(tf.matmul(visible, self.W) + self.bh_) hstates = self.sample_prob(hprobs, self.hrand) return hprobs, hstates def sample_visible_from_hidden(self, hidden): """ Sample the visible units from the hidden units. This is the Negative phase of the Contrastive Divergence algorithm. :param hidden: activations of the hidden units :return: visible probabilities """ visible_activation = tf.matmul(hidden, tf.transpose(self.W)) + self.bv_ if self.visible_unit_type == 'bin': vprobs = tf.nn.sigmoid(visible_activation) elif self.visible_unit_type == 'gauss': vprobs = tf.truncated_normal((1, self.num_visible), mean=visible_activation, stddev=self.stddev) else: vprobs = None return vprobs def compute_positive_association(self, visible, hidden_probs, hidden_states): """ Compute positive associations between visible and hidden units. :param visible: visible units :param hidden_probs: hidden units probabilities :param hidden_states: hidden units states :return: positive association = dot(visible.T, hidden) """ if self.visible_unit_type == 'bin': positive = tf.matmul(tf.transpose(visible), hidden_states) elif self.visible_unit_type == 'gauss': positive = tf.matmul(tf.transpose(visible), hidden_probs) else: positive = None return positive def _create_model_directory(self): """ Create the directory for storing the model :return: self """ if not os.path.isdir(self.main_dir): print("Created dir: ", self.main_dir) os.mkdir(self.main_dir) def getRecontructError(self, data): """ return Reconstruction Error (loss) from data in batch. :param data: input data of shape num_samples x visible_size :return: Reconstruction cost for each sample in the batch """ with tf.Session() as self.tf_session: self.tf_saver.restore(self.tf_session, self.model_path) batch_loss = self.tf_session.run(self.batch_cost, feed_dict=self._create_feed_dict(data)) return batch_loss def getFreeEnergy(self, data): """ return Free Energy from data. :param data: input data of shape num_samples x visible_size :return: Free Energy for each sample: p(x) """ with tf.Session() as self.tf_session: self.tf_saver.restore(self.tf_session, self.model_path) batch_FE = self.tf_session.run(self.batch_free_energy, feed_dict=self._create_feed_dict(data)) return batch_FE def getRecontruction(self, data): with tf.Session() as self.tf_session: self.tf_saver.restore(self.tf_session, self.model_path) batch_reconstruct = self.tf_session.run(self.recontruct, feed_dict=self._create_feed_dict(data)) return batch_reconstruct def load_model(self, shape, gibbs_sampling_steps, model_path): """ Load a trained model from disk. The shape of the model (num_visible, num_hidden) and the number of gibbs sampling steps must be known in order to restore the model. :param shape: tuple(num_visible, num_hidden) :param gibbs_sampling_steps: :param model_path: :return: self """ self.num_visible, self.num_hidden = shape[0], shape[1] self.gibbs_sampling_steps = gibbs_sampling_steps tf.reset_default_graph() self._build_model() init_op = tf.global_variables_initializer() self.tf_saver = tf.train.Saver() with tf.Session() as self.tf_session: self.tf_session.run(init_op) self.tf_saver.restore(self.tf_session, model_path) def get_model_parameters(self): """ Return the model parameters in the form of numpy arrays. :return: model parameters """ with tf.Session() as self.tf_session: self.tf_saver.restore(self.tf_session, self.model_path) return { 'W': self.W.eval(), 'bh_': self.bh_.eval(), 'bv_': self.bv_.eval() } #The MIT License (MIT) #Copyright (c) 2016 Gabriele Angeletti #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #SOFTWARE. #© 2019 GitHub, Inc.
def __init__(self,num_visible,num_hidden,visible_unit_type='bin',main_dir='/Users/chamalgomes/Documents/Python/GitLab/DeepLearning/KAI PROJECT/rbm/models', model_name='rbm_model',gibbs_sampling_steps=1,learning_rate=0.01,momentum=0.9,l2=0.001,batch_size=10, num_epochs=10,stddev=0.1,verbose=0,plot_training_loss=True): """" INPUT PARAMETER 1) num_visible: number of visible units in the RBM INPUT PARAMETER 2) num_hidden: number of hidden units in the RBM INPUT PARAMETER 3) main_dir: main directory to put the models, data and summary directories INPUT PARAMETER 4) model_name: name of the model you wanna save the data INPUT PARAMETER 5) gibbs_sampling_steps: Default 1 (Hence Optional) INPUT PARAMETER 6) learning_rate: Default 0.01 (Hence Optional) INPUT PARAMETER 7) momentum: Default 0.9(Hence Optional) for Gradient Descent INPUT PARAMETER 8) l2: l2 regularization lambda value for weight decay Default 0.001(Hence Optional) INPUT PARAMETER 9) batch_size: Default 10 (Hence Optional) INPUT PARAMETER 10) num_epochs: Default 10 (Hence Optional) INPUT PARAMETER 11) stddev: optional, default 0.1. Ignored if visible_unit_type is not 'gauss' INPUT PARAMETER 12) verbose: evel of verbosity. optional, default 0(for Regularization) INPUT PARAMETER 13) plot_training_loss: whether or not to plot training loss, default True INPUT PARAMETER 14) visible_units_type: Binary or Gaussian (Default Binary) """ #Defining main paramters self.num_visible = num_visible #1 self.num_hidden = num_hidden #2 self.main_dir = main_dir #3 self.model_name = model_name #4 self.gibbs_sampling_steps = gibbs_sampling_steps #5 self.learning_rate = learning_rate #6 self.momentum = momentum #7 self.l2 = l2 #8 self.batch_size = batch_size #9 self.num_epochs = num_epochs #10 self.stddev = stddev #11 self.verbose = verbose #12 self.plot_training_loss = plot_training_loss #13 self.visible_unit_type = visible_unit_type #14 self._create_model_directory() self.model_path = os.path.join(self.main_dir, self.model_name) self.W = None self.bh_ = None self.bv_ = None self.dw = None self.dbh_ = None self.dbv_ = None self.w_upd8 = None self.bh_upd8 = None self.bv_upd8 = None self.encode = None self.recontruct = None self.loss_function = None self.batch_cost = None self.batch_free_energy = None self.training_losses = [] self.input_data = None#_build_model self.hrand = None # _build_model self.validation_size = None #fit self.tf_session = None #fit self.tf_saver = None #_initialize_tf_utilities_and_ops
9
72
import tensorflow as tf import numpy as np import os import matplotlib.pyplot as plt from tqdm import tqdm class RBM(object): def __init__(self,num_visible,num_hidden,visible_unit_type='bin',main_dir='/Users/chamalgomes/Documents/Python/GitLab/DeepLearning/KAI PROJECT/rbm/models', model_name='rbm_model',gibbs_sampling_steps=1,learning_rate=0.01,momentum=0.9,l2=0.001,batch_size=10, num_epochs=10,stddev=0.1,verbose=0,plot_training_loss=True): """" INPUT PARAMETER 1) num_visible: number of visible units in the RBM INPUT PARAMETER 2) num_hidden: number of hidden units in the RBM INPUT PARAMETER 3) main_dir: main directory to put the models, data and summary directories INPUT PARAMETER 4) model_name: name of the model you wanna save the data INPUT PARAMETER 5) gibbs_sampling_steps: Default 1 (Hence Optional) INPUT PARAMETER 6) learning_rate: Default 0.01 (Hence Optional) INPUT PARAMETER 7) momentum: Default 0.9(Hence Optional) for Gradient Descent INPUT PARAMETER 8) l2: l2 regularization lambda value for weight decay Default 0.001(Hence Optional) INPUT PARAMETER 9) batch_size: Default 10 (Hence Optional) INPUT PARAMETER 10) num_epochs: Default 10 (Hence Optional) INPUT PARAMETER 11) stddev: optional, default 0.1. Ignored if visible_unit_type is not 'gauss' INPUT PARAMETER 12) verbose: evel of verbosity. optional, default 0(for Regularization) INPUT PARAMETER 13) plot_training_loss: whether or not to plot training loss, default True INPUT PARAMETER 14) visible_units_type: Binary or Gaussian (Default Binary) """ #Defining main paramters self.num_visible = num_visible #1 self.num_hidden = num_hidden #2 self.main_dir = main_dir #3 self.model_name = model_name #4 self.gibbs_sampling_steps = gibbs_sampling_steps #5 self.learning_rate = learning_rate #6 self.momentum = momentum #7 self.l2 = l2 #8 self.batch_size = batch_size #9 self.num_epochs = num_epochs #10 self.stddev = stddev #11 self.verbose = verbose #12 self.plot_training_loss = plot_training_loss #13 self.visible_unit_type = visible_unit_type #14 self._create_model_directory() self.model_path = os.path.join(self.main_dir, self.model_name) self.W = None self.bh_ = None self.bv_ = None self.dw = None self.dbh_ = None self.dbv_ = None self.w_upd8 = None self.bh_upd8 = None self.bv_upd8 = None self.encode = None self.recontruct = None self.loss_function = None self.batch_cost = None self.batch_free_energy = None self.training_losses = [] self.input_data = None#_build_model self.hrand = None # _build_model self.validation_size = None #fit self.tf_session = None #fit self.tf_saver = None #_initialize_tf_utilities_and_ops def sample_prob(self,probs,rand): """ takes a tensor of probabilitiesas from a sigmoidal activation and sample from all the distributions. probs INPUT parameter: tensor of probabilities rand INPUT parameter :tensor (of same shape as probabilities) of random values :RETURN binary sample of probabilities """ return tf.nn.relu(tf.sign(probs-rand)) def gen_batches(self,data,batch_size): """ Divide input data into batches data INPUT parameter: input data( like a data frame) batch_size INPUT parameter: desired size of each batch :RETURN data divided in batches """ data = np.array(data) for i in range(0,data.shape[0],batch_size): yield data[i:i+batch_size] def fit(self,train_set,validation_set = None,restore_previous_model=False): """" fit the model to the training data INPUT PARAMETER train_set: training set INPUT PARAMETER validation set.default None (Hence Optional) INPUT PARAMETER restore_previous_model: if true, a previous trained model with the same name of this model is restored from disk to continue training. OUTPUT: self """ if validation_set is not None: self.validation_size = validation_set.shape[0] tf.reset_default_graph() self._build_model()# you will come across it later on with tf.Session() as self.tf_session: self._initialize_tf_utilities_and_ops(restore_previous_model) self._train_model(train_set, validation_set) self.tf_saver.save(self.tf_session, self.model_path) if self.plot_training_loss: #plot editing should be done here as you wish plt.plot(self.training_losses) plt.title("Training batch losses v.s. iteractions") plt.xlabel("Num of training iteractions") plt.ylabel("Reconstruction error") plt.show() def _initialize_tf_utilities_and_ops(self, restore_previous_model): """" Initialize TensorFlow operations: summaries, init operations, saver, summary_writer. Restore a previously trained model if the flag restore_previous_model is true. """ init_op = tf.global_variables_initializer() self.tf_saver = tf.train.Saver() self.tf_session.run(init_op) if restore_previous_model: self.tf_saver.restore(self.tf_session, self.model_path) def _train_model(self, train_set, validation_set): """" Train the Model INPUT PARAMETER train set: Training set INPUT PARAMETER validation_set: Validation set OUTPUT self """ for i in range(self.num_epochs): self._run_train_step(train_set) if validation_set is not None: self._run_validation_error(i, validation_set) def _run_train_step(self,train_set): """" Run a training step. A training step is made by randomly shuffling the training set, divide into batches and run the variable update nodes for each batch. If self.plot_training_loss is true, will record training loss after each batch. INPUT PARAMETER train_set: training set OUTPUT self """ np.random.shuffle(train_set) batches = [_ for _ in self.gen_batches(train_set, self.batch_size)] updates = [self.w_upd8, self.bh_upd8, self.bv_upd8] for batch in batches: if self.plot_training_loss: _,loss = self.tf_session.run([updates,self.loss_function],feed_dict = self._create_feed_dict(batch)) self.training_losses.append(loss) else: self.tf_session.run(updates, feed_dict=self._create_feed_dict(batch)) def _run_validation_error(self, epoch, validation_set): """ Run the error computation on the validation set and print it out for each epoch. INPUT PARAMETER: current epoch INPUT PARAMETER validation_set: validation data OUTPUT: self """ loss = self.tf_session.run(self.loss_function, feed_dict=self._create_feed_dict(validation_set)) if self.verbose == 1: tqdm.write("Validation cost at step %s: %s" % (epoch, loss)) def _create_feed_dict(self, data): """ Create the dictionary of data to feed to TensorFlow's session during training. :param data: training/validation set batch :return: dictionary(self.input_data: data, self.hrand: random_uniform) """ return { self.input_data: data, self.hrand: np.random.rand(data.shape[0], self.num_hidden), } def _build_model(self): """ BUilding the Restriced Boltzman Machine in Tensorflow """ self.input_data, self.hrand = self._create_placeholders() #check the function below self.W, self.bh_, self.bv_, self.dw, self.dbh_, self.dbv_ = self._create_variables()#check the function below hprobs0, hstates0, vprobs, hprobs1, hstates1 = self.gibbs_sampling_step(self.input_data) positive = self.compute_positive_association(self.input_data, hprobs0, hstates0) nn_input = vprobs for step in range(self.gibbs_sampling_steps - 1): hprobs, hstates, vprobs, hprobs1, hstates1 = self.gibbs_sampling_step(nn_input) nn_input = vprobs self.reconstruct = vprobs negative = tf.matmul(tf.transpose(vprobs), hprobs1) self.encode = hprobs1 #exact formula in my paper dw = positive - negative self.dw = self.momentum*self.dw + (1-self.momentum)*dw self.w_upd8 = self.W.assign_add(self.learning_rate*self.dw - self.learning_rate*self.l2*self.W) dbh_ = tf.reduce_mean(hprobs0 - hprobs1, 0) self.dbh_ = self.momentum*self.dbh_ + self.learning_rate*dbh_ self.bh_upd8 = self.bh_.assign_add(self.dbh_) dbv_ = tf.reduce_mean(self.input_data - vprobs, 0) self.dbv_ = self.momentum*self.dbv_ + self.learning_rate*dbv_ self.bv_upd8 = self.bv_.assign_add(self.dbv_) self.loss_function = tf.sqrt(tf.reduce_mean(tf.square(self.input_data - vprobs))) self.batch_cost = tf.sqrt(tf.reduce_mean(tf.square(self.input_data - vprobs), 1)) self._create_free_energy_for_batch() def _create_free_energy_for_batch(self): """ Create free energy ops to batch input data :return: self """ if self.visible_unit_type == 'bin': self._create_free_energy_for_bin() elif self.visible_unit_type == 'gauss': self._create_free_energy_for_gauss() else: self.batch_free_energy = None def _create_free_energy_for_bin(self): """ Create free energy for mdoel with Bin visible layer :return: self """ #Refer to the Binary Free Energy Equation self.batch_free_energy = - (tf.matmul(self.input_data, tf.reshape(self.bv_, [-1, 1])) + tf.reshape(tf.reduce_sum(tf.log(tf.exp(tf.matmul(self.input_data, self.W) + self.bh_) + 1), 1), [-1, 1])) def _create_free_energy_for_gauss(self): """ Create free energy for model with Gauss visible layer :return: self """ #Refer to the Gaussian Free Energy Equation self.batch_free_energy = - (tf.matmul(self.input_data, tf.reshape(self.bv_, [-1, 1])) - tf.reshape(tf.reduce_sum(0.5 * self.input_data * self.input_data, 1), [-1, 1]) + tf.reshape(tf.reduce_sum(tf.log(tf.exp(tf.matmul(self.input_data, self.W) + self.bh_) + 1), 1), [-1, 1])) def _create_placeholders(self): """ Create the TensorFlow placeholders for the model. :return: tuple(input(shape(None, num_visible)), hrand(shape(None, num_hidden))) """ x = tf.placeholder('float', [None, self.num_visible], name='x-input') hrand = tf.placeholder('float', [None, self.num_hidden], name='hrand') return x, hrand def _create_variables(self): """ Create the TensorFlow variables for the model. :return: tuple(weights(shape(num_visible, num_hidden), hidden bias(shape(num_hidden)), visible bias(shape(num_visible))) """ W = tf.Variable(tf.random_normal((self.num_visible, self.num_hidden), mean=0.0, stddev=0.01), name='weights') dw = tf.Variable(tf.zeros([self.num_visible, self.num_hidden]), name = 'derivative-weights') bh_ = tf.Variable(tf.zeros([self.num_hidden]), name='hidden-bias') dbh_ = tf.Variable(tf.zeros([self.num_hidden]), name='derivative-hidden-bias') bv_ = tf.Variable(tf.zeros([self.num_visible]), name='visible-bias') dbv_ = tf.Variable(tf.zeros([self.num_visible]), name='derivative-visible-bias') return W, bh_, bv_, dw, dbh_, dbv_ def gibbs_sampling_step(self, visible): """ Performs one step of gibbs sampling. :param visible: activations of the visible units :return: tuple(hidden probs, hidden states, visible probs, new hidden probs, new hidden states) """ hprobs, hstates = self.sample_hidden_from_visible(visible) vprobs = self.sample_visible_from_hidden(hprobs) hprobs1, hstates1 = self.sample_hidden_from_visible(vprobs) return hprobs, hstates, vprobs, hprobs1, hstates1 def sample_hidden_from_visible(self, visible): """ Sample the hidden units from the visible units. This is the Positive phase of the Contrastive Divergence algorithm. :param visible: activations of the visible units :return: tuple(hidden probabilities, hidden binary states) """ hprobs = tf.nn.sigmoid(tf.matmul(visible, self.W) + self.bh_) hstates = self.sample_prob(hprobs, self.hrand) return hprobs, hstates def sample_visible_from_hidden(self, hidden): """ Sample the visible units from the hidden units. This is the Negative phase of the Contrastive Divergence algorithm. :param hidden: activations of the hidden units :return: visible probabilities """ visible_activation = tf.matmul(hidden, tf.transpose(self.W)) + self.bv_ if self.visible_unit_type == 'bin': vprobs = tf.nn.sigmoid(visible_activation) elif self.visible_unit_type == 'gauss': vprobs = tf.truncated_normal((1, self.num_visible), mean=visible_activation, stddev=self.stddev) else: vprobs = None return vprobs def compute_positive_association(self, visible, hidden_probs, hidden_states): """ Compute positive associations between visible and hidden units. :param visible: visible units :param hidden_probs: hidden units probabilities :param hidden_states: hidden units states :return: positive association = dot(visible.T, hidden) """ if self.visible_unit_type == 'bin': positive = tf.matmul(tf.transpose(visible), hidden_states) elif self.visible_unit_type == 'gauss': positive = tf.matmul(tf.transpose(visible), hidden_probs) else: positive = None return positive def _create_model_directory(self): """ Create the directory for storing the model :return: self """ if not os.path.isdir(self.main_dir): print("Created dir: ", self.main_dir) os.mkdir(self.main_dir) def getRecontructError(self, data): """ return Reconstruction Error (loss) from data in batch. :param data: input data of shape num_samples x visible_size :return: Reconstruction cost for each sample in the batch """ with tf.Session() as self.tf_session: self.tf_saver.restore(self.tf_session, self.model_path) batch_loss = self.tf_session.run(self.batch_cost, feed_dict=self._create_feed_dict(data)) return batch_loss def getFreeEnergy(self, data): """ return Free Energy from data. :param data: input data of shape num_samples x visible_size :return: Free Energy for each sample: p(x) """ with tf.Session() as self.tf_session: self.tf_saver.restore(self.tf_session, self.model_path) batch_FE = self.tf_session.run(self.batch_free_energy, feed_dict=self._create_feed_dict(data)) return batch_FE def getRecontruction(self, data): with tf.Session() as self.tf_session: self.tf_saver.restore(self.tf_session, self.model_path) batch_reconstruct = self.tf_session.run(self.recontruct, feed_dict=self._create_feed_dict(data)) return batch_reconstruct def load_model(self, shape, gibbs_sampling_steps, model_path): """ Load a trained model from disk. The shape of the model (num_visible, num_hidden) and the number of gibbs sampling steps must be known in order to restore the model. :param shape: tuple(num_visible, num_hidden) :param gibbs_sampling_steps: :param model_path: :return: self """ self.num_visible, self.num_hidden = shape[0], shape[1] self.gibbs_sampling_steps = gibbs_sampling_steps tf.reset_default_graph() self._build_model() init_op = tf.global_variables_initializer() self.tf_saver = tf.train.Saver() with tf.Session() as self.tf_session: self.tf_session.run(init_op) self.tf_saver.restore(self.tf_session, model_path) def get_model_parameters(self): """ Return the model parameters in the form of numpy arrays. :return: model parameters """ with tf.Session() as self.tf_session: self.tf_saver.restore(self.tf_session, self.model_path) return { 'W': self.W.eval(), 'bh_': self.bh_.eval(), 'bv_': self.bv_.eval() } #The MIT License (MIT) #Copyright (c) 2016 Gabriele Angeletti #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #SOFTWARE. #© 2019 GitHub, Inc.
__init__
Initializes the AVL Node. Args: data (dict, optional): {Key:Value} pair. Defaults to None.
class NoNodeData(Exception): pass class AVLNode(object): # MASKED: __init__ function (lines 7-18) def __str__(self) -> str: """Prints single AVL Node to stdout Raises: NoNodeData: If no data is present in the node Returns: str: output string """ if self.key: out = "data: {0}\nleft: {1}\nright: {2}\n".format( (self.key, self.value), self.left.__str__(), self.right.__str__()) return out raise NoNodeData def get_key(self) -> str: """returns the key of the node Returns: str: the key in (key, value) pair """ return self.key def get_value(self) -> str: """returns the value of the key Returns: str: the value in (key, value) pair """ return self.value
def __init__(self, key=None, value=None) -> None: """Initializes the AVL Node. Args: data (dict, optional): {Key:Value} pair. Defaults to None. """ super().__init__() self.key = key self.value = value self.left = None self.right = None self.height = 1
7
18
class NoNodeData(Exception): pass class AVLNode(object): def __init__(self, key=None, value=None) -> None: """Initializes the AVL Node. Args: data (dict, optional): {Key:Value} pair. Defaults to None. """ super().__init__() self.key = key self.value = value self.left = None self.right = None self.height = 1 def __str__(self) -> str: """Prints single AVL Node to stdout Raises: NoNodeData: If no data is present in the node Returns: str: output string """ if self.key: out = "data: {0}\nleft: {1}\nright: {2}\n".format( (self.key, self.value), self.left.__str__(), self.right.__str__()) return out raise NoNodeData def get_key(self) -> str: """returns the key of the node Returns: str: the key in (key, value) pair """ return self.key def get_value(self) -> str: """returns the value of the key Returns: str: the value in (key, value) pair """ return self.value
__str__
Prints single AVL Node to stdout Raises: NoNodeData: If no data is present in the node Returns: str: output string
class NoNodeData(Exception): pass class AVLNode(object): def __init__(self, key=None, value=None) -> None: """Initializes the AVL Node. Args: data (dict, optional): {Key:Value} pair. Defaults to None. """ super().__init__() self.key = key self.value = value self.left = None self.right = None self.height = 1 # MASKED: __str__ function (lines 20-33) def get_key(self) -> str: """returns the key of the node Returns: str: the key in (key, value) pair """ return self.key def get_value(self) -> str: """returns the value of the key Returns: str: the value in (key, value) pair """ return self.value
def __str__(self) -> str: """Prints single AVL Node to stdout Raises: NoNodeData: If no data is present in the node Returns: str: output string """ if self.key: out = "data: {0}\nleft: {1}\nright: {2}\n".format( (self.key, self.value), self.left.__str__(), self.right.__str__()) return out raise NoNodeData
20
33
class NoNodeData(Exception): pass class AVLNode(object): def __init__(self, key=None, value=None) -> None: """Initializes the AVL Node. Args: data (dict, optional): {Key:Value} pair. Defaults to None. """ super().__init__() self.key = key self.value = value self.left = None self.right = None self.height = 1 def __str__(self) -> str: """Prints single AVL Node to stdout Raises: NoNodeData: If no data is present in the node Returns: str: output string """ if self.key: out = "data: {0}\nleft: {1}\nright: {2}\n".format( (self.key, self.value), self.left.__str__(), self.right.__str__()) return out raise NoNodeData def get_key(self) -> str: """returns the key of the node Returns: str: the key in (key, value) pair """ return self.key def get_value(self) -> str: """returns the value of the key Returns: str: the value in (key, value) pair """ return self.value
__init__
Daniel: careful about RAM usage. See: https://github.com/BerkeleyAutomation/baselines-fork/issues/9 For this we can assume that in the replay buffer, the teacher samples come first, and are fixed ahead of time, so our 'starting' index for adding into the replay buffer should be offset by this quantity.
"""Similar to DDPG except we only need obs and act, not the reward, etc. """ import numpy as np class RingBuffer(object): def __init__(self, maxlen, shape, dtype='float32'): self.maxlen = maxlen self.start = 0 self.length = 0 if dtype == 'uint8': # Daniel: special case with our XP replay. Force memory allocation # right away by the += 0 op, to check that system has enough RAM. # Might not be good for speed so we'll have to time it. self.data = np.zeros((maxlen,) + shape, dtype=np.uint8) print("Allocating data of size {} ...".format(self.data.shape)) self.data += 0 else: self.data = np.zeros((maxlen,) + shape).astype(dtype) # Daniel: avoid over-writing teacher samples. self.teach_idx = 0 def __len__(self): return self.length def __getitem__(self, idx): # Daniel: we shouldn't be calling this if it's using our DDPG/IMIT. assert self.teach_idx == 0, \ 'Something went wrong, why are we calling this method?' if idx < 0 or idx >= self.length: raise KeyError() return self.data[(self.start + idx) % self.maxlen] def get_batch(self, idxs): #return self.data[(self.start + idxs) % self.maxlen] # Daniel: seems like it's just fine to do this. It's the responsibility # of the caller to call a valid set of indices. And we do that with # randint in the memory class later. Here we avoid headaches with # `self.start` because I restrict it to be at least the teach_idx. return self.data[idxs] def append(self, v, is_teacher=False): if self.length < self.maxlen: # We have space, simply increase the length. self.length += 1 if is_teacher: self.teach_idx += 1 elif self.length == self.maxlen: # No space, "remove" the first item. #self.start = (self.start + 1) % self.maxlen self.start = max(self.teach_idx, (self.start + 1) % self.maxlen) else: # This should never happen. raise RuntimeError() self.data[(self.start + self.length - 1) % self.maxlen] = v def array_min2d(x): x = np.array(x) if x.ndim >= 2: return x return x.reshape(-1, 1) class Memory(object): # MASKED: __init__ function (lines 68-85) def sample(self, batch_size): # Draw such that we always have a proceeding element. # TODO(Daniel): the -2 doesn't make sense, we don't need a proceeding # element because the next observation is in a separate ring buffer?? I # think it should be nb_entries, so we are in practice not sampling the # last two items in this replay buffer. I'm switching to -1, should do # 0 later if I'm confident we're not ignoring anything else ... if self.do_valid: # If we're doing validation, which should NOT normally be true, # ignore the first few items, which we assign to be in validation. batch_idxs = np.random.randint(self.nb_valid_items, self.nb_entries-1, size=batch_size) else: batch_idxs = np.random.randint(self.nb_entries-1, size=batch_size) obs0_batch = self.observations0.get_batch(batch_idxs) action_batch = self.actions.get_batch(batch_idxs) # Assume `x < self.nb_teach` (not equality!) is a teacher sample. flag_teacher = (batch_idxs < self.nb_teach).astype(np.float32) result = { 'obs0': array_min2d(obs0_batch), 'actions': array_min2d(action_batch), 'flag_teacher': array_min2d(flag_teacher), } return result def append(self, obs0, action, is_teacher=False, training=True): """Keep separate copies of obs0, obs1. So it's not memory efficient. """ if not training: return if is_teacher: assert not self.done_adding_teach, self.nb_teach assert self.nb_teach < self.limit, self.nb_teach self.nb_teach += 1 self.observations0.append(obs0, is_teacher) self.actions.append(action, is_teacher) def set_teacher_idx(self): """Call from IMIT so we do not over-write teacher data. """ self.done_adding_teach = True def set_valid_idx(self): """Set the validation index. """ assert self.done_adding_teach self.nb_valid_items = int(self.valid_frac * self.nb_entries) @property def nb_entries(self): return len(self.observations0) @property def nb_teach_entries(self): return self.nb_teach @property def nb_valid(self): return self.nb_valid_items def get_valid_obs(self, s_idx, e_idx): """Get a validation minibatch with fixed starting and ending indices. """ assert self.do_valid batch_idxs = np.arange(s_idx, e_idx) obs0_batch = self.observations0.get_batch(batch_idxs) action_batch = self.actions.get_batch(batch_idxs) result = { 'obs0': array_min2d(obs0_batch), 'actions': array_min2d(action_batch), } return result
def __init__(self, limit, action_shape, observation_shape, dtype='float32', do_valid=False): """Daniel: careful about RAM usage. See: https://github.com/BerkeleyAutomation/baselines-fork/issues/9 For this we can assume that in the replay buffer, the teacher samples come first, and are fixed ahead of time, so our 'starting' index for adding into the replay buffer should be offset by this quantity. """ self.limit = limit self.do_valid = do_valid if self.do_valid: self.valid_frac = 0.2 self.nb_valid_items = 0 # will adjust later self.observations0 = RingBuffer(limit, shape=observation_shape, dtype=dtype) self.actions = RingBuffer(limit, shape=action_shape) self.nb_teach = 0 self.done_adding_teach = False
68
85
"""Similar to DDPG except we only need obs and act, not the reward, etc. """ import numpy as np class RingBuffer(object): def __init__(self, maxlen, shape, dtype='float32'): self.maxlen = maxlen self.start = 0 self.length = 0 if dtype == 'uint8': # Daniel: special case with our XP replay. Force memory allocation # right away by the += 0 op, to check that system has enough RAM. # Might not be good for speed so we'll have to time it. self.data = np.zeros((maxlen,) + shape, dtype=np.uint8) print("Allocating data of size {} ...".format(self.data.shape)) self.data += 0 else: self.data = np.zeros((maxlen,) + shape).astype(dtype) # Daniel: avoid over-writing teacher samples. self.teach_idx = 0 def __len__(self): return self.length def __getitem__(self, idx): # Daniel: we shouldn't be calling this if it's using our DDPG/IMIT. assert self.teach_idx == 0, \ 'Something went wrong, why are we calling this method?' if idx < 0 or idx >= self.length: raise KeyError() return self.data[(self.start + idx) % self.maxlen] def get_batch(self, idxs): #return self.data[(self.start + idxs) % self.maxlen] # Daniel: seems like it's just fine to do this. It's the responsibility # of the caller to call a valid set of indices. And we do that with # randint in the memory class later. Here we avoid headaches with # `self.start` because I restrict it to be at least the teach_idx. return self.data[idxs] def append(self, v, is_teacher=False): if self.length < self.maxlen: # We have space, simply increase the length. self.length += 1 if is_teacher: self.teach_idx += 1 elif self.length == self.maxlen: # No space, "remove" the first item. #self.start = (self.start + 1) % self.maxlen self.start = max(self.teach_idx, (self.start + 1) % self.maxlen) else: # This should never happen. raise RuntimeError() self.data[(self.start + self.length - 1) % self.maxlen] = v def array_min2d(x): x = np.array(x) if x.ndim >= 2: return x return x.reshape(-1, 1) class Memory(object): def __init__(self, limit, action_shape, observation_shape, dtype='float32', do_valid=False): """Daniel: careful about RAM usage. See: https://github.com/BerkeleyAutomation/baselines-fork/issues/9 For this we can assume that in the replay buffer, the teacher samples come first, and are fixed ahead of time, so our 'starting' index for adding into the replay buffer should be offset by this quantity. """ self.limit = limit self.do_valid = do_valid if self.do_valid: self.valid_frac = 0.2 self.nb_valid_items = 0 # will adjust later self.observations0 = RingBuffer(limit, shape=observation_shape, dtype=dtype) self.actions = RingBuffer(limit, shape=action_shape) self.nb_teach = 0 self.done_adding_teach = False def sample(self, batch_size): # Draw such that we always have a proceeding element. # TODO(Daniel): the -2 doesn't make sense, we don't need a proceeding # element because the next observation is in a separate ring buffer?? I # think it should be nb_entries, so we are in practice not sampling the # last two items in this replay buffer. I'm switching to -1, should do # 0 later if I'm confident we're not ignoring anything else ... if self.do_valid: # If we're doing validation, which should NOT normally be true, # ignore the first few items, which we assign to be in validation. batch_idxs = np.random.randint(self.nb_valid_items, self.nb_entries-1, size=batch_size) else: batch_idxs = np.random.randint(self.nb_entries-1, size=batch_size) obs0_batch = self.observations0.get_batch(batch_idxs) action_batch = self.actions.get_batch(batch_idxs) # Assume `x < self.nb_teach` (not equality!) is a teacher sample. flag_teacher = (batch_idxs < self.nb_teach).astype(np.float32) result = { 'obs0': array_min2d(obs0_batch), 'actions': array_min2d(action_batch), 'flag_teacher': array_min2d(flag_teacher), } return result def append(self, obs0, action, is_teacher=False, training=True): """Keep separate copies of obs0, obs1. So it's not memory efficient. """ if not training: return if is_teacher: assert not self.done_adding_teach, self.nb_teach assert self.nb_teach < self.limit, self.nb_teach self.nb_teach += 1 self.observations0.append(obs0, is_teacher) self.actions.append(action, is_teacher) def set_teacher_idx(self): """Call from IMIT so we do not over-write teacher data. """ self.done_adding_teach = True def set_valid_idx(self): """Set the validation index. """ assert self.done_adding_teach self.nb_valid_items = int(self.valid_frac * self.nb_entries) @property def nb_entries(self): return len(self.observations0) @property def nb_teach_entries(self): return self.nb_teach @property def nb_valid(self): return self.nb_valid_items def get_valid_obs(self, s_idx, e_idx): """Get a validation minibatch with fixed starting and ending indices. """ assert self.do_valid batch_idxs = np.arange(s_idx, e_idx) obs0_batch = self.observations0.get_batch(batch_idxs) action_batch = self.actions.get_batch(batch_idxs) result = { 'obs0': array_min2d(obs0_batch), 'actions': array_min2d(action_batch), } return result
date_to_decimal_year
Convert a date to decimal year. Args: date (str): Date as a string. format (str, optional): Format of the date if a conversion is needed. Returns: float: Decimal year corresponding to the date.
import datetime import os import shutil import subprocess import urllib.request from contextlib import closing import numpy as np import pandas as pd import requests import wbml.out __all__ = [ "DependencyError", "resource", "dependency", "asserted_dependency", "split_df", "data_path", "date_to_decimal_year", ] class DependencyError(AssertionError): """Exception raised in case of an erroneous dependency.""" def resource(target, url, post=False, **kw_args): """Specify a dependency on an online resource. Further takes in keyword arguments that are passed to the appropriate method from :mod:`requests` or :mod:`urllib`. Args: target (str): Target file. url (str): Source URL. post (bool, optional): Make a POST request instead of a GET request. Only applicable if the URL starts with "http" or "https". Defaults to `False`. """ if not os.path.exists(target): with wbml.out.Section("Downloading file"): wbml.out.kv("Source", url) wbml.out.kv("Target", target) # Ensure that all directories in the path exist. make_dirs(target) # If the URL starts with "ftp", use the :mod:`urllib` library. if url.startswith("ftp"): with closing(urllib.request.urlopen(url, **kw_args)) as r: with open(target, "wb") as f: shutil.copyfileobj(r, f) # By default, use the :mod:`requests` library. else: request = requests.post if post else requests.get with request(url, stream=True, **kw_args) as r: with open(target, "wb") as f: shutil.copyfileobj(r.raw, f) def dependency(target, source, commands): """Specify a dependency that is generated from an existing file. Args: target (str): Target file. source (str): Source file. commands (list[str]): List of commands to generate target file. """ if not os.path.exists(target): with wbml.out.Section("Generating file"): wbml.out.kv("Source", source) wbml.out.kv("Target", target) # Check that the source exists. if not os.path.exists(source): raise DependencyError( f'Source "{source}" asserted to exist, but it does not.' ) # Save current working directory. current_wd = os.getcwd() # Ensure that all directories in the path exist. make_dirs(target) # Perform commands. for command in commands: wbml.out.out(command) # Change working directory to directory of target file, run # command, and restore working directory afterwards. os.chdir(os.path.dirname(target)) subprocess.call(command, shell=True) os.chdir(current_wd) def asserted_dependency(target): """Specify a dependency that cannot be fetched. Args: target (str): Target file. """ if not os.path.exists(target): raise DependencyError( f'Dependency "{target}" is asserted to exist, ' f"but it does not, and it cannot be " f"automatically fetched. Please put the file " f"into place manually." ) def make_dirs(path): """Make the directories in the path of a file. Args: path (url): Path of a file. """ os.makedirs(os.path.dirname(path), exist_ok=True) def data_path(*xs): """Get the path of a data file. Args: *xs (str): Parts of the path. Returns: str: Absolute path. """ return os.path.abspath( os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, "data", *xs) ) def split_df(df, index_range, columns, iloc=False): """Split a data frame by selecting from columns a particular range. Args: df (:class:`pd.DataFrame`): Data frame to split. index_range (tuple): Tuple containing lower and upper limit of the range to split the index by. If `index_range = (a, b)`, then `[a, b)` is taken. columns (list[object]): Columns to select. iloc (bool, optional): The index range is the integer location instead of the index value. Defaults to `False`. Returns: tuple[:class:`pd.DataFrame`]: Selected rows from selected columns and the remainder. """ if iloc: inds = np.arange(df.shape[0]) rows = (inds >= index_range[0]) & (inds < index_range[1]) else: rows = (df.index >= index_range[0]) & (df.index < index_range[1]) selected = pd.DataFrame([df[name][rows] for name in columns]).T remainder = pd.DataFrame( [df[name][~rows] for name in columns] + [df[name] for name in set(df.columns) - set(columns)] ).T # Fix order of columns. selected_inds = [i for i, c in enumerate(df.columns) if c in columns] selected = selected.reindex(df.columns[np.array(selected_inds)], axis=1) remainder = remainder.reindex(df.columns, axis=1) return selected, remainder # MASKED: date_to_decimal_year function (lines 173-197)
def date_to_decimal_year(date, format=None): """Convert a date to decimal year. Args: date (str): Date as a string. format (str, optional): Format of the date if a conversion is needed. Returns: float: Decimal year corresponding to the date. """ if format: date = datetime.datetime.strptime(date, format) start = datetime.date(date.year, 1, 1).toordinal() year_length = datetime.date(date.year + 1, 1, 1).toordinal() - start # Account for subday time. subday_time = 0 if hasattr(date, "hour"): subday_time += date.hour / year_length / 24 if hasattr(date, "minute"): subday_time += date.minute / year_length / 24 / 60 if hasattr(date, "second"): subday_time += date.second / year_length / 24 / 60 / 60 return date.year + float(date.toordinal() - start) / year_length + subday_time
173
197
import datetime import os import shutil import subprocess import urllib.request from contextlib import closing import numpy as np import pandas as pd import requests import wbml.out __all__ = [ "DependencyError", "resource", "dependency", "asserted_dependency", "split_df", "data_path", "date_to_decimal_year", ] class DependencyError(AssertionError): """Exception raised in case of an erroneous dependency.""" def resource(target, url, post=False, **kw_args): """Specify a dependency on an online resource. Further takes in keyword arguments that are passed to the appropriate method from :mod:`requests` or :mod:`urllib`. Args: target (str): Target file. url (str): Source URL. post (bool, optional): Make a POST request instead of a GET request. Only applicable if the URL starts with "http" or "https". Defaults to `False`. """ if not os.path.exists(target): with wbml.out.Section("Downloading file"): wbml.out.kv("Source", url) wbml.out.kv("Target", target) # Ensure that all directories in the path exist. make_dirs(target) # If the URL starts with "ftp", use the :mod:`urllib` library. if url.startswith("ftp"): with closing(urllib.request.urlopen(url, **kw_args)) as r: with open(target, "wb") as f: shutil.copyfileobj(r, f) # By default, use the :mod:`requests` library. else: request = requests.post if post else requests.get with request(url, stream=True, **kw_args) as r: with open(target, "wb") as f: shutil.copyfileobj(r.raw, f) def dependency(target, source, commands): """Specify a dependency that is generated from an existing file. Args: target (str): Target file. source (str): Source file. commands (list[str]): List of commands to generate target file. """ if not os.path.exists(target): with wbml.out.Section("Generating file"): wbml.out.kv("Source", source) wbml.out.kv("Target", target) # Check that the source exists. if not os.path.exists(source): raise DependencyError( f'Source "{source}" asserted to exist, but it does not.' ) # Save current working directory. current_wd = os.getcwd() # Ensure that all directories in the path exist. make_dirs(target) # Perform commands. for command in commands: wbml.out.out(command) # Change working directory to directory of target file, run # command, and restore working directory afterwards. os.chdir(os.path.dirname(target)) subprocess.call(command, shell=True) os.chdir(current_wd) def asserted_dependency(target): """Specify a dependency that cannot be fetched. Args: target (str): Target file. """ if not os.path.exists(target): raise DependencyError( f'Dependency "{target}" is asserted to exist, ' f"but it does not, and it cannot be " f"automatically fetched. Please put the file " f"into place manually." ) def make_dirs(path): """Make the directories in the path of a file. Args: path (url): Path of a file. """ os.makedirs(os.path.dirname(path), exist_ok=True) def data_path(*xs): """Get the path of a data file. Args: *xs (str): Parts of the path. Returns: str: Absolute path. """ return os.path.abspath( os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, "data", *xs) ) def split_df(df, index_range, columns, iloc=False): """Split a data frame by selecting from columns a particular range. Args: df (:class:`pd.DataFrame`): Data frame to split. index_range (tuple): Tuple containing lower and upper limit of the range to split the index by. If `index_range = (a, b)`, then `[a, b)` is taken. columns (list[object]): Columns to select. iloc (bool, optional): The index range is the integer location instead of the index value. Defaults to `False`. Returns: tuple[:class:`pd.DataFrame`]: Selected rows from selected columns and the remainder. """ if iloc: inds = np.arange(df.shape[0]) rows = (inds >= index_range[0]) & (inds < index_range[1]) else: rows = (df.index >= index_range[0]) & (df.index < index_range[1]) selected = pd.DataFrame([df[name][rows] for name in columns]).T remainder = pd.DataFrame( [df[name][~rows] for name in columns] + [df[name] for name in set(df.columns) - set(columns)] ).T # Fix order of columns. selected_inds = [i for i, c in enumerate(df.columns) if c in columns] selected = selected.reindex(df.columns[np.array(selected_inds)], axis=1) remainder = remainder.reindex(df.columns, axis=1) return selected, remainder def date_to_decimal_year(date, format=None): """Convert a date to decimal year. Args: date (str): Date as a string. format (str, optional): Format of the date if a conversion is needed. Returns: float: Decimal year corresponding to the date. """ if format: date = datetime.datetime.strptime(date, format) start = datetime.date(date.year, 1, 1).toordinal() year_length = datetime.date(date.year + 1, 1, 1).toordinal() - start # Account for subday time. subday_time = 0 if hasattr(date, "hour"): subday_time += date.hour / year_length / 24 if hasattr(date, "minute"): subday_time += date.minute / year_length / 24 / 60 if hasattr(date, "second"): subday_time += date.second / year_length / 24 / 60 / 60 return date.year + float(date.toordinal() - start) / year_length + subday_time
__call__
Call function to augment common fields in results. Args: results (dict): Result dict contains the data to augment. Returns: dict: The result dict contains the data that is augmented with different scales and flips.
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import warnings from copy import deepcopy from mmdet.datasets.builder import PIPELINES from mmdet.datasets.pipelines import Compose @PIPELINES.register_module() class MultiScaleFlipAug3D(object): """Test-time augmentation with multiple scales and flipping. Args: transforms (list[dict]): Transforms to apply in each augmentation. img_scale (tuple | list[tuple]: Images scales for resizing. pts_scale_ratio (float | list[float]): Points scale ratios for resizing. flip (bool): Whether apply flip augmentation. Defaults to False. flip_direction (str | list[str]): Flip augmentation directions for images, options are "horizontal" and "vertical". If flip_direction is list, multiple flip augmentations will be applied. It has no effect when ``flip == False``. Defaults to "horizontal". pcd_horizontal_flip (bool): Whether apply horizontal flip augmentation to point cloud. Defaults to True. Note that it works only when 'flip' is turned on. pcd_vertical_flip (bool): Whether apply vertical flip augmentation to point cloud. Defaults to True. Note that it works only when 'flip' is turned on. """ def __init__(self, transforms, img_scale, pts_scale_ratio, flip=False, flip_direction='horizontal', pcd_horizontal_flip=False, pcd_vertical_flip=False): self.transforms = Compose(transforms) self.img_scale = img_scale if isinstance(img_scale, list) else [img_scale] self.pts_scale_ratio = pts_scale_ratio \ if isinstance(pts_scale_ratio, list) else[float(pts_scale_ratio)] assert mmcv.is_list_of(self.img_scale, tuple) assert mmcv.is_list_of(self.pts_scale_ratio, float) self.flip = flip self.pcd_horizontal_flip = pcd_horizontal_flip self.pcd_vertical_flip = pcd_vertical_flip self.flip_direction = flip_direction if isinstance( flip_direction, list) else [flip_direction] assert mmcv.is_list_of(self.flip_direction, str) if not self.flip and self.flip_direction != ['horizontal']: warnings.warn( 'flip_direction has no effect when flip is set to False') if (self.flip and not any([(t['type'] == 'RandomFlip3D' or t['type'] == 'RandomFlip') for t in transforms])): warnings.warn( 'flip has no effect when RandomFlip is not in transforms') # MASKED: __call__ function (lines 66-111) def __repr__(self): """str: Return a string that describes the module.""" repr_str = self.__class__.__name__ repr_str += f'(transforms={self.transforms}, ' repr_str += f'img_scale={self.img_scale}, flip={self.flip}, ' repr_str += f'pts_scale_ratio={self.pts_scale_ratio}, ' repr_str += f'flip_direction={self.flip_direction})' return repr_str
def __call__(self, results): """Call function to augment common fields in results. Args: results (dict): Result dict contains the data to augment. Returns: dict: The result dict contains the data that is augmented with \ different scales and flips. """ aug_data = [] # modified from `flip_aug = [False, True] if self.flip else [False]` # to reduce unnecessary scenes when using double flip augmentation # during test time flip_aug = [True] if self.flip else [False] pcd_horizontal_flip_aug = [False, True] \ if self.flip and self.pcd_horizontal_flip else [False] pcd_vertical_flip_aug = [False, True] \ if self.flip and self.pcd_vertical_flip else [False] for scale in self.img_scale: for pts_scale_ratio in self.pts_scale_ratio: for flip in flip_aug: for pcd_horizontal_flip in pcd_horizontal_flip_aug: for pcd_vertical_flip in pcd_vertical_flip_aug: for direction in self.flip_direction: # results.copy will cause bug # since it is shallow copy _results = deepcopy(results) _results['scale'] = scale _results['flip'] = flip _results['pcd_scale_factor'] = \ pts_scale_ratio _results['flip_direction'] = direction _results['pcd_horizontal_flip'] = \ pcd_horizontal_flip _results['pcd_vertical_flip'] = \ pcd_vertical_flip data = self.transforms(_results) aug_data.append(data) # list of dict to dict of list aug_data_dict = {key: [] for key in aug_data[0]} for data in aug_data: for key, val in data.items(): aug_data_dict[key].append(val) return aug_data_dict
66
111
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import warnings from copy import deepcopy from mmdet.datasets.builder import PIPELINES from mmdet.datasets.pipelines import Compose @PIPELINES.register_module() class MultiScaleFlipAug3D(object): """Test-time augmentation with multiple scales and flipping. Args: transforms (list[dict]): Transforms to apply in each augmentation. img_scale (tuple | list[tuple]: Images scales for resizing. pts_scale_ratio (float | list[float]): Points scale ratios for resizing. flip (bool): Whether apply flip augmentation. Defaults to False. flip_direction (str | list[str]): Flip augmentation directions for images, options are "horizontal" and "vertical". If flip_direction is list, multiple flip augmentations will be applied. It has no effect when ``flip == False``. Defaults to "horizontal". pcd_horizontal_flip (bool): Whether apply horizontal flip augmentation to point cloud. Defaults to True. Note that it works only when 'flip' is turned on. pcd_vertical_flip (bool): Whether apply vertical flip augmentation to point cloud. Defaults to True. Note that it works only when 'flip' is turned on. """ def __init__(self, transforms, img_scale, pts_scale_ratio, flip=False, flip_direction='horizontal', pcd_horizontal_flip=False, pcd_vertical_flip=False): self.transforms = Compose(transforms) self.img_scale = img_scale if isinstance(img_scale, list) else [img_scale] self.pts_scale_ratio = pts_scale_ratio \ if isinstance(pts_scale_ratio, list) else[float(pts_scale_ratio)] assert mmcv.is_list_of(self.img_scale, tuple) assert mmcv.is_list_of(self.pts_scale_ratio, float) self.flip = flip self.pcd_horizontal_flip = pcd_horizontal_flip self.pcd_vertical_flip = pcd_vertical_flip self.flip_direction = flip_direction if isinstance( flip_direction, list) else [flip_direction] assert mmcv.is_list_of(self.flip_direction, str) if not self.flip and self.flip_direction != ['horizontal']: warnings.warn( 'flip_direction has no effect when flip is set to False') if (self.flip and not any([(t['type'] == 'RandomFlip3D' or t['type'] == 'RandomFlip') for t in transforms])): warnings.warn( 'flip has no effect when RandomFlip is not in transforms') def __call__(self, results): """Call function to augment common fields in results. Args: results (dict): Result dict contains the data to augment. Returns: dict: The result dict contains the data that is augmented with \ different scales and flips. """ aug_data = [] # modified from `flip_aug = [False, True] if self.flip else [False]` # to reduce unnecessary scenes when using double flip augmentation # during test time flip_aug = [True] if self.flip else [False] pcd_horizontal_flip_aug = [False, True] \ if self.flip and self.pcd_horizontal_flip else [False] pcd_vertical_flip_aug = [False, True] \ if self.flip and self.pcd_vertical_flip else [False] for scale in self.img_scale: for pts_scale_ratio in self.pts_scale_ratio: for flip in flip_aug: for pcd_horizontal_flip in pcd_horizontal_flip_aug: for pcd_vertical_flip in pcd_vertical_flip_aug: for direction in self.flip_direction: # results.copy will cause bug # since it is shallow copy _results = deepcopy(results) _results['scale'] = scale _results['flip'] = flip _results['pcd_scale_factor'] = \ pts_scale_ratio _results['flip_direction'] = direction _results['pcd_horizontal_flip'] = \ pcd_horizontal_flip _results['pcd_vertical_flip'] = \ pcd_vertical_flip data = self.transforms(_results) aug_data.append(data) # list of dict to dict of list aug_data_dict = {key: [] for key in aug_data[0]} for data in aug_data: for key, val in data.items(): aug_data_dict[key].append(val) return aug_data_dict def __repr__(self): """str: Return a string that describes the module.""" repr_str = self.__class__.__name__ repr_str += f'(transforms={self.transforms}, ' repr_str += f'img_scale={self.img_scale}, flip={self.flip}, ' repr_str += f'pts_scale_ratio={self.pts_scale_ratio}, ' repr_str += f'flip_direction={self.flip_direction})' return repr_str